mirror of https://github.com/apache/cloudstack.git
Merge branch 'main' into cks-enhancements-upstream
This commit is contained in:
commit
0f79583466
|
|
@ -236,7 +236,7 @@ jobs:
|
|||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python3 -m pip install --user --upgrade urllib3 lxml paramiko nose texttable ipmisim pyopenssl pycrypto mock flask netaddr pylint pycodestyle six astroid pynose
|
||||
python3 -m pip install --user --upgrade urllib3 lxml paramiko nose texttable ipmisim pyopenssl pycryptodome mock flask netaddr pylint pycodestyle six astroid pynose
|
||||
|
||||
- name: Install jacoco dependencies
|
||||
run: |
|
||||
|
|
|
|||
|
|
@ -441,3 +441,9 @@ iscsi.session.cleanup.enabled=false
|
|||
|
||||
# Wait(in seconds) during agent reconnections. When no value is set then default value of 5s will be used
|
||||
#backoff.seconds=
|
||||
|
||||
# Timeout (in seconds) to wait for the snapshot reversion to complete.
|
||||
# revert.snapshot.timeout=10800
|
||||
|
||||
# Timeout (in seconds) to wait for the incremental snapshot to complete.
|
||||
# incremental.snapshot.timeout=10800
|
||||
|
|
|
|||
|
|
@ -818,6 +818,16 @@ public class AgentProperties{
|
|||
*/
|
||||
public static final Property<Integer> SSL_HANDSHAKE_TIMEOUT = new Property<>("ssl.handshake.timeout", 30, Integer.class);
|
||||
|
||||
/**
|
||||
* Timeout (in seconds) to wait for the incremental snapshot to complete.
|
||||
* */
|
||||
public static final Property<Integer> INCREMENTAL_SNAPSHOT_TIMEOUT = new Property<>("incremental.snapshot.timeout", 10800);
|
||||
|
||||
/**
|
||||
* Timeout (in seconds) to wait for the snapshot reversion to complete.
|
||||
* */
|
||||
public static final Property<Integer> REVERT_SNAPSHOT_TIMEOUT = new Property<>("revert.snapshot.timeout", 10800);
|
||||
|
||||
public static class Property <T>{
|
||||
private String name;
|
||||
private T defaultValue;
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ public interface DeploymentClusterPlanner extends DeploymentPlanner {
|
|||
"vm.allocation.algorithm",
|
||||
"Advanced",
|
||||
"random",
|
||||
"Order in which hosts within a cluster will be considered for VM/volume allocation. The value can be 'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', or 'firstfitleastconsumed'.",
|
||||
"Order in which hosts within a cluster will be considered for VM allocation. The value can be 'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', or 'firstfitleastconsumed'.",
|
||||
true,
|
||||
ConfigKey.Scope.Global, null, null, null, null, null,
|
||||
ConfigKey.Kind.Select,
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ public interface Snapshot extends ControlledEntity, Identity, InternalIdentity,
|
|||
}
|
||||
|
||||
public enum State {
|
||||
Allocated, Creating, CreatedOnPrimary, BackingUp, BackedUp, Copying, Destroying, Destroyed,
|
||||
Allocated, Creating, CreatedOnPrimary, BackingUp, BackedUp, Copying, Destroying, Destroyed, Hidden,
|
||||
//it's a state, user can't see the snapshot from ui, while the snapshot may still exist on the storage
|
||||
Error;
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
|
||||
public class ConvertSnapshotAnswer extends Answer {
|
||||
|
||||
private SnapshotObjectTO snapshotObjectTO;
|
||||
|
||||
public ConvertSnapshotAnswer(SnapshotObjectTO snapshotObjectTO) {
|
||||
super(null);
|
||||
this.snapshotObjectTO = snapshotObjectTO;
|
||||
}
|
||||
|
||||
public SnapshotObjectTO getSnapshotObjectTO() {
|
||||
return snapshotObjectTO;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
|
||||
public class ConvertSnapshotCommand extends Command {
|
||||
|
||||
public static final String TEMP_SNAPSHOT_NAME = "_temp";
|
||||
|
||||
SnapshotObjectTO snapshotObjectTO;
|
||||
|
||||
public SnapshotObjectTO getSnapshotObjectTO() {
|
||||
return snapshotObjectTO;
|
||||
}
|
||||
|
||||
public ConvertSnapshotCommand(SnapshotObjectTO snapshotObjectTO) {
|
||||
this.snapshotObjectTO = snapshotObjectTO;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -24,11 +24,13 @@ public class GetVmIpAddressCommand extends Command {
|
|||
String vmName;
|
||||
String vmNetworkCidr;
|
||||
boolean windows = false;
|
||||
String macAddress;
|
||||
|
||||
public GetVmIpAddressCommand(String vmName, String vmNetworkCidr, boolean windows) {
|
||||
public GetVmIpAddressCommand(String vmName, String vmNetworkCidr, boolean windows, String macAddress) {
|
||||
this.vmName = vmName;
|
||||
this.windows = windows;
|
||||
this.vmNetworkCidr = vmNetworkCidr;
|
||||
this.macAddress = macAddress;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -47,4 +49,8 @@ public class GetVmIpAddressCommand extends Command {
|
|||
public String getVmNetworkCidr() {
|
||||
return vmNetworkCidr;
|
||||
}
|
||||
|
||||
public String getMacAddress() {
|
||||
return macAddress;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,6 +46,10 @@ public class ModifyStoragePoolAnswer extends Answer {
|
|||
templateInfo = tInfo;
|
||||
}
|
||||
|
||||
public ModifyStoragePoolAnswer(ModifyStoragePoolCommand cmd, boolean success, String details) {
|
||||
super(cmd, success, details);
|
||||
}
|
||||
|
||||
public void setPoolInfo(StoragePoolInfo poolInfo) {
|
||||
this.poolInfo = poolInfo;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,49 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class RecreateCheckpointsCommand extends Command {
|
||||
|
||||
private List<VolumeObjectTO> volumes;
|
||||
|
||||
private String vmName;
|
||||
|
||||
public RecreateCheckpointsCommand(List<VolumeObjectTO> volumes, String vmName) {
|
||||
this.volumes = volumes;
|
||||
this.vmName = vmName;
|
||||
}
|
||||
|
||||
public List<VolumeObjectTO> getDisks() {
|
||||
return volumes;
|
||||
}
|
||||
|
||||
public String getVmName() {
|
||||
return vmName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
|
||||
public class RemoveBitmapCommand extends Command {
|
||||
|
||||
private SnapshotObjectTO snapshotObjectTO;
|
||||
|
||||
private boolean isVmRunning;
|
||||
|
||||
public RemoveBitmapCommand(SnapshotObjectTO snapshotObjectTO, boolean isVmRunning) {
|
||||
this.snapshotObjectTO = snapshotObjectTO;
|
||||
this.isVmRunning = isVmRunning;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public SnapshotObjectTO getSnapshotObjectTO() {
|
||||
return snapshotObjectTO;
|
||||
}
|
||||
|
||||
public boolean isVmRunning() {
|
||||
return isVmRunning;
|
||||
}
|
||||
}
|
||||
|
|
@ -19,18 +19,22 @@
|
|||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
|
||||
public class UnprepareStorageClientCommand extends Command {
|
||||
private StoragePoolType poolType;
|
||||
private String poolUuid;
|
||||
private Map<String, String> details;
|
||||
|
||||
public UnprepareStorageClientCommand() {
|
||||
}
|
||||
|
||||
public UnprepareStorageClientCommand(StoragePoolType poolType, String poolUuid) {
|
||||
public UnprepareStorageClientCommand(StoragePoolType poolType, String poolUuid, Map<String, String> details) {
|
||||
this.poolType = poolType;
|
||||
this.poolUuid = poolUuid;
|
||||
this.details = details;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -45,4 +49,8 @@ public class UnprepareStorageClientCommand extends Command {
|
|||
public String getPoolUuid() {
|
||||
return poolUuid;
|
||||
}
|
||||
|
||||
public Map<String, String> getDetails() {
|
||||
return details;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma
|
|||
}
|
||||
return new CreateObjectAnswer("not supported type");
|
||||
} catch (Exception e) {
|
||||
logger.debug("Failed to create object: " + data.getObjectType() + ": " + e.toString());
|
||||
logger.error("Failed to create object [{}] due to [{}].", data.getObjectType(), e.getMessage(), e);
|
||||
return new CreateObjectAnswer(e.toString());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import java.io.InputStream;
|
|||
import java.io.RandomAccessFile;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
|
|
@ -80,6 +81,18 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
|
|||
private ResourceType resourceType = ResourceType.TEMPLATE;
|
||||
private final HttpMethodRetryHandler myretryhandler;
|
||||
private boolean followRedirects = false;
|
||||
private boolean isChunkedTransfer;
|
||||
|
||||
protected static final List<String> CUSTOM_HEADERS_FOR_CHUNKED_TRANSFER_SIZE = Arrays.asList(
|
||||
"x-goog-stored-content-length",
|
||||
"x-goog-meta-size",
|
||||
"x-amz-meta-size",
|
||||
"x-amz-meta-content-length",
|
||||
"x-object-meta-size",
|
||||
"x-original-content-length",
|
||||
"x-oss-meta-content-length",
|
||||
"x-file-size");
|
||||
private static final long MIN_FORMAT_VERIFICATION_SIZE = 1024 * 1024;
|
||||
|
||||
public HttpTemplateDownloader(StorageLayer storageLayer, String downloadUrl, String toDir, DownloadCompleteCallback callback, long maxTemplateSizeInBytes,
|
||||
String user, String password, Proxy proxy, ResourceType resourceType) {
|
||||
|
|
@ -205,13 +218,11 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
|
|||
RandomAccessFile out = new RandomAccessFile(file, "rw");
|
||||
) {
|
||||
out.seek(localFileSize);
|
||||
|
||||
logger.info("Starting download from " + downloadUrl + " to " + toFile + " remoteSize=" + toHumanReadableSize(remoteSize) + " , max size=" + toHumanReadableSize(maxTemplateSizeInBytes));
|
||||
|
||||
if (copyBytes(file, in, out)) return 0;
|
||||
|
||||
logger.info("Starting download from {} to {} remoteSize={} , max size={}",downloadUrl, toFile,
|
||||
toHumanReadableSize(remoteSize), toHumanReadableSize(maxTemplateSizeInBytes));
|
||||
boolean eof = copyBytes(file, in, out);
|
||||
Date finish = new Date();
|
||||
checkDowloadCompletion();
|
||||
checkDownloadCompletion(eof);
|
||||
downloadTime += finish.getTime() - start.getTime();
|
||||
} finally { /* in.close() and out.close() */ }
|
||||
return totalBytes;
|
||||
|
|
@ -237,28 +248,32 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
|
|||
}
|
||||
|
||||
private boolean copyBytes(File file, InputStream in, RandomAccessFile out) throws IOException {
|
||||
int bytes;
|
||||
byte[] block = new byte[CHUNK_SIZE];
|
||||
byte[] buffer = new byte[CHUNK_SIZE];
|
||||
long offset = 0;
|
||||
boolean done = false;
|
||||
VerifyFormat verifyFormat = new VerifyFormat(file);
|
||||
status = Status.IN_PROGRESS;
|
||||
while (!done && status != Status.ABORTED && offset <= remoteSize) {
|
||||
if ((bytes = in.read(block, 0, CHUNK_SIZE)) > -1) {
|
||||
offset = writeBlock(bytes, out, block, offset);
|
||||
if (!ResourceType.SNAPSHOT.equals(resourceType) &&
|
||||
!verifyFormat.isVerifiedFormat() &&
|
||||
(offset >= 1048576 || offset >= remoteSize)) { //let's check format after we get 1MB or full file
|
||||
verifyFormat.invoke();
|
||||
}
|
||||
} else {
|
||||
done = true;
|
||||
while (status != Status.ABORTED) {
|
||||
int bytesRead = in.read(buffer, 0, CHUNK_SIZE);
|
||||
if (bytesRead == -1) {
|
||||
logger.debug("Reached EOF on input stream");
|
||||
break;
|
||||
}
|
||||
offset = writeBlock(bytesRead, out, buffer, offset);
|
||||
if (!ResourceType.SNAPSHOT.equals(resourceType)
|
||||
&& !verifyFormat.isVerifiedFormat()
|
||||
&& (offset >= MIN_FORMAT_VERIFICATION_SIZE || offset >= remoteSize)) {
|
||||
verifyFormat.invoke();
|
||||
}
|
||||
if (offset >= remoteSize) {
|
||||
logger.debug("Reached expected remote size limit: {} bytes", remoteSize);
|
||||
break;
|
||||
}
|
||||
}
|
||||
out.getFD().sync();
|
||||
return false;
|
||||
return !Status.ABORTED.equals(status);
|
||||
}
|
||||
|
||||
|
||||
private long writeBlock(int bytes, RandomAccessFile out, byte[] block, long offset) throws IOException {
|
||||
out.write(block, 0, bytes);
|
||||
offset += bytes;
|
||||
|
|
@ -267,11 +282,13 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
|
|||
return offset;
|
||||
}
|
||||
|
||||
private void checkDowloadCompletion() {
|
||||
private void checkDownloadCompletion(boolean eof) {
|
||||
String downloaded = "(incomplete download)";
|
||||
if (totalBytes >= remoteSize) {
|
||||
if (eof && ((totalBytes >= remoteSize) || (isChunkedTransfer && remoteSize == maxTemplateSizeInBytes))) {
|
||||
status = Status.DOWNLOAD_FINISHED;
|
||||
downloaded = "(download complete remote=" + toHumanReadableSize(remoteSize) + " bytes)";
|
||||
downloaded = "(download complete remote=" +
|
||||
(remoteSize == maxTemplateSizeInBytes ? toHumanReadableSize(remoteSize) : "unknown") +
|
||||
" bytes)";
|
||||
}
|
||||
errorString = "Downloaded " + toHumanReadableSize(totalBytes) + " bytes " + downloaded;
|
||||
}
|
||||
|
|
@ -293,18 +310,42 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
|
|||
}
|
||||
}
|
||||
|
||||
protected long getRemoteSizeForChunkedTransfer() {
|
||||
for (String headerKey : CUSTOM_HEADERS_FOR_CHUNKED_TRANSFER_SIZE) {
|
||||
Header header = request.getResponseHeader(headerKey);
|
||||
if (header == null) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
return Long.parseLong(header.getValue());
|
||||
} catch (NumberFormatException ignored) {}
|
||||
}
|
||||
Header contentRangeHeader = request.getResponseHeader("Content-Range");
|
||||
if (contentRangeHeader != null) {
|
||||
String contentRange = contentRangeHeader.getValue();
|
||||
if (contentRange != null && contentRange.contains("/")) {
|
||||
String totalSize = contentRange.substring(contentRange.indexOf('/') + 1).trim();
|
||||
return Long.parseLong(totalSize);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
private boolean tryAndGetRemoteSize() {
|
||||
Header contentLengthHeader = request.getResponseHeader("content-length");
|
||||
boolean chunked = false;
|
||||
isChunkedTransfer = false;
|
||||
long reportedRemoteSize = 0;
|
||||
if (contentLengthHeader == null) {
|
||||
Header chunkedHeader = request.getResponseHeader("Transfer-Encoding");
|
||||
if (chunkedHeader == null || !"chunked".equalsIgnoreCase(chunkedHeader.getValue())) {
|
||||
if (chunkedHeader != null && "chunked".equalsIgnoreCase(chunkedHeader.getValue())) {
|
||||
isChunkedTransfer = true;
|
||||
reportedRemoteSize = getRemoteSizeForChunkedTransfer();
|
||||
logger.debug("{} is using chunked transfer encoding, possible remote size: {}", downloadUrl,
|
||||
reportedRemoteSize);
|
||||
} else {
|
||||
status = Status.UNRECOVERABLE_ERROR;
|
||||
errorString = " Failed to receive length of download ";
|
||||
return false;
|
||||
} else if ("chunked".equalsIgnoreCase(chunkedHeader.getValue())) {
|
||||
chunked = true;
|
||||
}
|
||||
} else {
|
||||
reportedRemoteSize = Long.parseLong(contentLengthHeader.getValue());
|
||||
|
|
@ -316,9 +357,11 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (remoteSize == 0) {
|
||||
remoteSize = reportedRemoteSize;
|
||||
if (remoteSize != 0) {
|
||||
logger.debug("Remote size for {} found to be {}", downloadUrl, toHumanReadableSize(remoteSize));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ public final class TemplateConstants {
|
|||
public static final String DEFAULT_SNAPSHOT_ROOT_DIR = "snapshots";
|
||||
public static final String DEFAULT_VOLUME_ROOT_DIR = "volumes";
|
||||
public static final String DEFAULT_TMPLT_FIRST_LEVEL_DIR = "tmpl/";
|
||||
|
||||
public static final String DEFAULT_CHECKPOINT_ROOT_DIR = "checkpoints";
|
||||
public static final String DEFAULT_SYSTEM_VM_TEMPLATE_PATH = "template/tmpl/1/";
|
||||
|
||||
public static final int DEFAULT_TMPLT_COPY_PORT = 80;
|
||||
|
|
|
|||
|
|
@ -35,12 +35,16 @@ public class SnapshotObjectTO extends DownloadableObjectTO implements DataTO {
|
|||
private VolumeObjectTO volume;
|
||||
private String parentSnapshotPath;
|
||||
private DataStoreTO dataStore;
|
||||
private DataStoreTO imageStore;
|
||||
private boolean kvmIncrementalSnapshot = false;
|
||||
private String vmName;
|
||||
private String name;
|
||||
private HypervisorType hypervisorType;
|
||||
private long id;
|
||||
private boolean quiescevm;
|
||||
private String[] parents;
|
||||
private DataStoreTO parentStore;
|
||||
private String checkpointPath;
|
||||
private Long physicalSize = (long) 0;
|
||||
private long accountId;
|
||||
|
||||
|
|
@ -49,6 +53,11 @@ public class SnapshotObjectTO extends DownloadableObjectTO implements DataTO {
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataObjectType getObjectType() {
|
||||
return DataObjectType.SNAPSHOT;
|
||||
}
|
||||
|
||||
public SnapshotObjectTO(SnapshotInfo snapshot) {
|
||||
this.path = snapshot.getPath();
|
||||
this.setId(snapshot.getId());
|
||||
|
|
@ -59,27 +68,28 @@ public class SnapshotObjectTO extends DownloadableObjectTO implements DataTO {
|
|||
this.setVmName(vol.getAttachedVmName());
|
||||
}
|
||||
|
||||
SnapshotInfo parentSnapshot = snapshot.getParent();
|
||||
ArrayList<String> parentsArry = new ArrayList<String>();
|
||||
if (parentSnapshot != null) {
|
||||
this.parentSnapshotPath = parentSnapshot.getPath();
|
||||
while(parentSnapshot != null) {
|
||||
parentsArry.add(parentSnapshot.getPath());
|
||||
parentSnapshot = parentSnapshot.getParent();
|
||||
}
|
||||
parents = parentsArry.toArray(new String[parentsArry.size()]);
|
||||
ArrayUtils.reverse(parents);
|
||||
}
|
||||
|
||||
this.dataStore = snapshot.getDataStore().getTO();
|
||||
this.setName(snapshot.getName());
|
||||
this.hypervisorType = snapshot.getHypervisorType();
|
||||
this.quiescevm = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataObjectType getObjectType() {
|
||||
return DataObjectType.SNAPSHOT;
|
||||
this.checkpointPath = snapshot.getCheckpointPath();
|
||||
this.kvmIncrementalSnapshot = snapshot.isKvmIncrementalSnapshot();
|
||||
|
||||
SnapshotInfo parentSnapshot = snapshot.getParent();
|
||||
|
||||
if (parentSnapshot == null || (HypervisorType.KVM.equals(snapshot.getHypervisorType()) && !parentSnapshot.isKvmIncrementalSnapshot())) {
|
||||
return;
|
||||
}
|
||||
|
||||
ArrayList<String> parentsArray = new ArrayList<>();
|
||||
this.parentSnapshotPath = parentSnapshot.getPath();
|
||||
while (parentSnapshot != null) {
|
||||
parentsArray.add(parentSnapshot.getPath());
|
||||
parentSnapshot = parentSnapshot.getParent();
|
||||
}
|
||||
parents = parentsArray.toArray(new String[parentsArray.size()]);
|
||||
ArrayUtils.reverse(parents);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -91,6 +101,30 @@ public class SnapshotObjectTO extends DownloadableObjectTO implements DataTO {
|
|||
this.dataStore = store;
|
||||
}
|
||||
|
||||
public DataStoreTO getImageStore() {
|
||||
return imageStore;
|
||||
}
|
||||
|
||||
public void setImageStore(DataStoreTO imageStore) {
|
||||
this.imageStore = imageStore;
|
||||
}
|
||||
|
||||
public boolean isKvmIncrementalSnapshot() {
|
||||
return kvmIncrementalSnapshot;
|
||||
}
|
||||
|
||||
public void setKvmIncrementalSnapshot(boolean kvmIncrementalSnapshot) {
|
||||
this.kvmIncrementalSnapshot = kvmIncrementalSnapshot;
|
||||
}
|
||||
|
||||
public String getCheckpointPath() {
|
||||
return checkpointPath;
|
||||
}
|
||||
|
||||
public void setCheckpointPath(String checkpointPath) {
|
||||
this.checkpointPath = checkpointPath;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPath() {
|
||||
return this.path;
|
||||
|
|
@ -178,6 +212,14 @@ public class SnapshotObjectTO extends DownloadableObjectTO implements DataTO {
|
|||
this.accountId = accountId;
|
||||
}
|
||||
|
||||
public DataStoreTO getParentStore() {
|
||||
return parentStore;
|
||||
}
|
||||
|
||||
public void setParentStore(DataStoreTO parentStore) {
|
||||
this.parentStore = parentStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringBuilder("SnapshotTO[datastore=").append(dataStore).append("|volume=").append(volume).append("|path").append(path).append("]").toString();
|
||||
|
|
|
|||
|
|
@ -33,6 +33,8 @@ import com.cloud.storage.Volume;
|
|||
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public class VolumeObjectTO extends DownloadableObjectTO implements DataTO {
|
||||
private String uuid;
|
||||
|
|
@ -76,6 +78,8 @@ public class VolumeObjectTO extends DownloadableObjectTO implements DataTO {
|
|||
@LogLevel(LogLevel.Log4jLevel.Off)
|
||||
private byte[] passphrase;
|
||||
private String encryptFormat;
|
||||
private List<String> checkpointPaths;
|
||||
private Set<String> checkpointImageStoreUrls;
|
||||
|
||||
public VolumeObjectTO() {
|
||||
|
||||
|
|
@ -122,6 +126,8 @@ public class VolumeObjectTO extends DownloadableObjectTO implements DataTO {
|
|||
this.passphrase = volume.getPassphrase();
|
||||
this.encryptFormat = volume.getEncryptFormat();
|
||||
this.followRedirects = volume.isFollowRedirects();
|
||||
this.checkpointPaths = volume.getCheckpointPaths();
|
||||
this.checkpointImageStoreUrls = volume.getCheckpointImageStoreUrls();
|
||||
}
|
||||
|
||||
public String getUuid() {
|
||||
|
|
@ -397,4 +403,21 @@ public class VolumeObjectTO extends DownloadableObjectTO implements DataTO {
|
|||
public boolean requiresEncryption() {
|
||||
return passphrase != null && passphrase.length > 0;
|
||||
}
|
||||
|
||||
|
||||
public List<String> getCheckpointPaths() {
|
||||
return checkpointPaths;
|
||||
}
|
||||
|
||||
public void setCheckpointPaths(List<String> checkpointPaths) {
|
||||
this.checkpointPaths = checkpointPaths;
|
||||
}
|
||||
|
||||
public Set<String> getCheckpointImageStoreUrls() {
|
||||
return checkpointImageStoreUrls;
|
||||
}
|
||||
|
||||
public void setCheckpointImageStoreUrls(Set<String> checkpointImageStoreUrls) {
|
||||
this.checkpointImageStoreUrls = checkpointImageStoreUrls;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.utils.Pair;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
|
|
@ -83,6 +84,17 @@ public interface VolumeOrchestrationService {
|
|||
"The maximum size for a volume (in GiB).",
|
||||
true);
|
||||
|
||||
ConfigKey<String> VolumeAllocationAlgorithm = new ConfigKey<>(
|
||||
String.class,
|
||||
"volume.allocation.algorithm",
|
||||
"Advanced",
|
||||
"random",
|
||||
"Order in which storage pool within a cluster will be considered for volume allocation. The value can be 'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', or 'firstfitleastconsumed'.",
|
||||
true,
|
||||
ConfigKey.Scope.Global, null, null, null, null, null,
|
||||
ConfigKey.Kind.Select,
|
||||
"random,firstfit,userdispersing,userconcentratedpod_random,userconcentratedpod_firstfit,firstfitleastconsumed");
|
||||
|
||||
VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType)
|
||||
throws ConcurrentOperationException, StorageUnavailableException;
|
||||
|
||||
|
|
@ -179,4 +191,9 @@ public interface VolumeOrchestrationService {
|
|||
* Unmanage VM volumes
|
||||
*/
|
||||
void unmanageVolumes(long vmId);
|
||||
|
||||
/**
|
||||
* Retrieves the volume's checkpoints paths to be used in the KVM processor. If there are no checkpoints, it will return an empty list.
|
||||
*/
|
||||
Pair<List<String>, Set<String>> getVolumeCheckpointPathsAndImageStoreUrls(long volumeId, HypervisorType hypervisorType);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,4 +45,8 @@ public interface DataStoreDriver {
|
|||
boolean canCopy(DataObject srcData, DataObject destData);
|
||||
|
||||
void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback);
|
||||
|
||||
default boolean canDisplayDetails() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.apache.cloudstack.engine.subsystem.api.storage;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public interface EndPointSelector {
|
||||
|
|
@ -39,6 +41,8 @@ public interface EndPointSelector {
|
|||
|
||||
EndPoint select(DataObject object, StorageAction action, boolean encryptionSupportRequired);
|
||||
|
||||
EndPoint selectRandom(long zoneId, Hypervisor.HypervisorType hypervisorType);
|
||||
|
||||
List<EndPoint> selectAll(DataStore store);
|
||||
|
||||
List<EndPoint> findAllEndpointsForScope(DataStore store);
|
||||
|
|
|
|||
|
|
@ -32,7 +32,8 @@ public interface ObjectInDataStoreStateMachine extends StateObject<ObjectInDataS
|
|||
Migrated("The object has been migrated"),
|
||||
Destroying("Template is destroying"),
|
||||
Destroyed("Template is destroyed"),
|
||||
Failed("Failed to download template");
|
||||
Failed("Failed to download template"),
|
||||
Hidden("The object is hidden from the user");
|
||||
String _description;
|
||||
|
||||
private State(String description) {
|
||||
|
|
|
|||
|
|
@ -145,6 +145,14 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intended for managed storage
|
||||
* returns true if the host can be disconnected from storage pool
|
||||
*/
|
||||
default boolean canDisconnectHostFromStoragePool(Host host, StoragePool pool) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used by storage pools which want to keep VMs' information
|
||||
* @return true if additional VM info is needed (intended for storage pools).
|
||||
|
|
|
|||
|
|
@ -32,6 +32,10 @@ public interface SnapshotInfo extends DataObject, Snapshot {
|
|||
|
||||
String getPath();
|
||||
|
||||
DataStore getImageStore();
|
||||
|
||||
void setImageStore(DataStore imageStore);
|
||||
|
||||
SnapshotInfo getChild();
|
||||
|
||||
List<SnapshotInfo> getChildren();
|
||||
|
|
@ -56,6 +60,14 @@ public interface SnapshotInfo extends DataObject, Snapshot {
|
|||
|
||||
void markBackedUp() throws CloudRuntimeException;
|
||||
|
||||
String getCheckpointPath();
|
||||
|
||||
void setCheckpointPath(String checkpointPath);
|
||||
|
||||
void setKvmIncrementalSnapshot(boolean isKvmIncrementalSnapshot);
|
||||
|
||||
boolean isKvmIncrementalSnapshot();
|
||||
|
||||
Snapshot getSnapshotVO();
|
||||
|
||||
long getAccountId();
|
||||
|
|
|
|||
|
|
@ -25,8 +25,12 @@ import com.cloud.storage.Snapshot.Event;
|
|||
public interface SnapshotService {
|
||||
SnapshotResult takeSnapshot(SnapshotInfo snapshot);
|
||||
|
||||
DataStore findSnapshotImageStore(SnapshotInfo snapshot);
|
||||
|
||||
SnapshotInfo backupSnapshot(SnapshotInfo snapshot);
|
||||
|
||||
SnapshotInfo convertSnapshot(SnapshotInfo snapshotInfo);
|
||||
|
||||
boolean deleteSnapshot(SnapshotInfo snapshot);
|
||||
|
||||
boolean revertSnapshot(SnapshotInfo snapshot);
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@ public enum StorageAction {
|
|||
TAKESNAPSHOT,
|
||||
BACKUPSNAPSHOT,
|
||||
DELETESNAPSHOT,
|
||||
CONVERTSNAPSHOT,
|
||||
REMOVEBITMAP,
|
||||
MIGRATEVOLUME,
|
||||
DELETEVOLUME
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,9 @@ import com.cloud.storage.Storage;
|
|||
import com.cloud.storage.Volume;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public interface VolumeInfo extends DownloadableDataInfo, Volume {
|
||||
|
||||
boolean isAttachedVM();
|
||||
|
|
@ -96,4 +99,8 @@ public interface VolumeInfo extends DownloadableDataInfo, Volume {
|
|||
public byte[] getPassphrase();
|
||||
|
||||
Volume getVolume();
|
||||
|
||||
List<String> getCheckpointPaths();
|
||||
|
||||
Set<String> getCheckpointImageStoreUrls();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -314,6 +314,8 @@ public interface StorageManager extends StorageService {
|
|||
|
||||
boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool);
|
||||
|
||||
boolean canDisconnectHostFromStoragePool(Host host, StoragePool pool);
|
||||
|
||||
Host getHost(long hostId);
|
||||
|
||||
Host updateSecondaryStorage(long secStorageId, String newUrl);
|
||||
|
|
|
|||
|
|
@ -247,6 +247,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||
protected final ConfigKey<Boolean> CheckTxnBeforeSending = new ConfigKey<>("Developer", Boolean.class, "check.txn.before.sending.agent.commands", "false",
|
||||
"This parameter allows developers to enable a check to see if a transaction wraps commands that are sent to the resource. This is not to be enabled on production systems.", true);
|
||||
|
||||
public static final List<Host.Type> HOST_DOWN_ALERT_UNSUPPORTED_HOST_TYPES = Arrays.asList(
|
||||
Host.Type.SecondaryStorage,
|
||||
Host.Type.ConsoleProxy
|
||||
);
|
||||
|
||||
@Override
|
||||
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
||||
|
||||
|
|
@ -1093,9 +1098,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||
if (determinedState == Status.Down) {
|
||||
final String message = String.format("Host %s is down. Starting HA on the VMs", host);
|
||||
logger.error(message);
|
||||
if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) {
|
||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(),
|
||||
host.getPodId(), String.format("Host down, %s", host), message);
|
||||
if (Status.Down.equals(host.getStatus())) {
|
||||
logger.debug(String.format("Skipping sending alert for %s as it already in %s state",
|
||||
host, host.getStatus()));
|
||||
} else if (!HOST_DOWN_ALERT_UNSUPPORTED_HOST_TYPES.contains(host.getType())) {
|
||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host down, " + host.getId(), message);
|
||||
}
|
||||
event = Status.Event.HostDown;
|
||||
} else if (determinedState == Status.Up) {
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationSe
|
|||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.framework.ca.Certificate;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.Configurable;
|
||||
|
|
@ -83,6 +84,7 @@ import org.apache.cloudstack.managed.context.ManagedContextRunnable;
|
|||
import org.apache.cloudstack.reservation.dao.ReservationDao;
|
||||
import org.apache.cloudstack.resource.ResourceCleanupService;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.cloudstack.utils.cache.SingleCache;
|
||||
|
|
@ -120,6 +122,7 @@ import com.cloud.agent.api.PrepareForMigrationAnswer;
|
|||
import com.cloud.agent.api.PrepareForMigrationCommand;
|
||||
import com.cloud.agent.api.RebootAnswer;
|
||||
import com.cloud.agent.api.RebootCommand;
|
||||
import com.cloud.agent.api.RecreateCheckpointsCommand;
|
||||
import com.cloud.agent.api.ReplugNicAnswer;
|
||||
import com.cloud.agent.api.ReplugNicCommand;
|
||||
import com.cloud.agent.api.RestoreVMSnapshotAnswer;
|
||||
|
|
@ -241,6 +244,7 @@ import com.cloud.storage.dao.StoragePoolHostDao;
|
|||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VMTemplateZoneDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
|
|
@ -412,6 +416,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
|
||||
private SingleCache<List<Long>> vmIdsInProgressCache;
|
||||
|
||||
@Inject
|
||||
private SnapshotDataStoreDao snapshotDataStoreDao;
|
||||
|
||||
@Inject
|
||||
private SnapshotManager snapshotManager;
|
||||
|
||||
@Inject
|
||||
private VolumeDataFactory volumeDataFactory;
|
||||
|
||||
|
||||
VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this);
|
||||
|
||||
Map<VirtualMachine.Type, VirtualMachineGuru> _vmGurus = new HashMap<>();
|
||||
|
|
@ -2915,6 +2929,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
_networkMgr.commitNicForMigration(vmSrc, profile);
|
||||
volumeMgr.release(vm.getId(), srcHostId);
|
||||
_networkMgr.setHypervisorHostname(profile, dest, true);
|
||||
recreateCheckpointsKvmOnVmAfterMigration(vm, dstHostId);
|
||||
|
||||
updateVmPod(vm, dstHostId);
|
||||
}
|
||||
|
|
@ -3364,6 +3379,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
_networkMgr.commitNicForMigration(vmSrc, profile);
|
||||
volumeMgr.release(vm.getId(), srcHostId);
|
||||
_networkMgr.setHypervisorHostname(profile, destination, true);
|
||||
endSnapshotChainForVolumes(volumeToPoolMap, vm.getHypervisorType());
|
||||
}
|
||||
|
||||
work.setStep(Step.Done);
|
||||
|
|
@ -3371,6 +3387,68 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
}
|
||||
}
|
||||
|
||||
protected void endSnapshotChainForVolumes(Map<Volume, StoragePool> volumeToPoolMap, HypervisorType hypervisorType) {
|
||||
Set<Volume> volumes = volumeToPoolMap.keySet();
|
||||
volumes.forEach(volume -> {
|
||||
Volume volumeOnDestination = _volsDao.findByPoolIdName(volumeToPoolMap.get(volume).getId(), volume.getName());
|
||||
snapshotManager.endSnapshotChainForVolume(volumeOnDestination.getId(), hypervisorType);
|
||||
});
|
||||
}
|
||||
|
||||
protected void recreateCheckpointsKvmOnVmAfterMigration(VMInstanceVO vm, long hostId) {
|
||||
if (!HypervisorType.KVM.equals(vm.getHypervisorType())) {
|
||||
logger.debug("Will not recreate checkpoint on VM as it is not running on KVM, thus it is not needed.");
|
||||
return;
|
||||
}
|
||||
|
||||
List<VolumeObjectTO> volumes = getVmVolumesWithCheckpointsToRecreate(vm);
|
||||
|
||||
if (volumes.isEmpty()) {
|
||||
logger.debug("Will not recreate checkpoints on VM as its volumes do not have any checkpoints associated with them.");
|
||||
return;
|
||||
}
|
||||
|
||||
RecreateCheckpointsCommand recreateCheckpointsCommand = new RecreateCheckpointsCommand(volumes, vm.getInstanceName());
|
||||
Answer answer = null;
|
||||
try {
|
||||
logger.debug(String.format("Recreating the volume checkpoints with URLs [%s] of volumes [%s] on %s as part of the migration process.", volumes.stream().map(VolumeObjectTO::getCheckpointPaths).collect(Collectors.toList()), volumes, vm));
|
||||
answer = _agentMgr.send(hostId, recreateCheckpointsCommand);
|
||||
} catch (AgentUnavailableException | OperationTimedoutException e) {
|
||||
logger.error(String.format("Exception while sending command to host [%s] to recreate checkpoints with URLs [%s] of volumes [%s] on %s due to: [%s].", hostId, volumes.stream().map(VolumeObjectTO::getCheckpointPaths).collect(Collectors.toList()), volumes, vm, e.getMessage()), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
} finally {
|
||||
if (answer != null && answer.getResult()) {
|
||||
logger.debug(String.format("Successfully recreated checkpoints on VM [%s].", vm));
|
||||
return;
|
||||
}
|
||||
|
||||
logger.debug(String.format("Migration on VM [%s] was successful; however, we weren't able to recreate the checkpoints on it. Marking the snapshot chain as ended." +
|
||||
" Next snapshot will create a new snapshot chain.", vm));
|
||||
|
||||
volumes.forEach(volumeObjectTO -> snapshotManager.endSnapshotChainForVolume(volumeObjectTO.getId(), HypervisorType.KVM));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected List<VolumeObjectTO> getVmVolumesWithCheckpointsToRecreate(VMInstanceVO vm) {
|
||||
List<VolumeVO> vmVolumes = _volsDao.findByInstance(vm.getId());
|
||||
List<VolumeObjectTO> volumes = new ArrayList<>();
|
||||
|
||||
for (VolumeVO volume : vmVolumes) {
|
||||
Pair<List<String>, Set<String>> volumeCheckpointPathsAndImageStoreUrls = volumeMgr.getVolumeCheckpointPathsAndImageStoreUrls(volume.getId(), HypervisorType.KVM);
|
||||
if (volumeCheckpointPathsAndImageStoreUrls.first().isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
VolumeObjectTO volumeTo = new VolumeObjectTO();
|
||||
volumeTo.setCheckpointPaths(volumeCheckpointPathsAndImageStoreUrls.first());
|
||||
volumeTo.setCheckpointImageStoreUrls(volumeCheckpointPathsAndImageStoreUrls.second());
|
||||
volumeTo.setPath(volume.getPath());
|
||||
volumes.add(volumeTo);
|
||||
}
|
||||
return volumes;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public VirtualMachineTO toVmTO(final VirtualMachineProfile profile) {
|
||||
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(profile.getVirtualMachine().getHypervisorType());
|
||||
|
|
|
|||
|
|
@ -595,7 +595,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworks) == null) {
|
||||
offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworks,
|
||||
"Offering for Isolated VPC networks with Source Nat service enabled", TrafficType.Guest, null, false, Availability.Optional, null,
|
||||
defaultVPCOffProviders, true, Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, true, false, false, null, null, null,true, null, null, false);
|
||||
defaultVPCOffProviders, true, Network.GuestType.Isolated, false, null, true, null, false, false, null, false, null, true, true, false, false, null, null, null,true, null, null, false);
|
||||
}
|
||||
|
||||
//#6 - default vpc offering with no LB service
|
||||
|
|
|
|||
|
|
@ -38,10 +38,12 @@ import java.util.stream.Collectors;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.deploy.DeploymentClusterPlanner;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.user.AccountManager;
|
||||
import org.apache.cloudstack.api.ApiCommandResourceType;
|
||||
import org.apache.cloudstack.api.ApiConstants.IoDriverPolicy;
|
||||
|
|
@ -73,6 +75,7 @@ import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
|||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.Configurable;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.framework.jobs.AsyncJobManager;
|
||||
import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
|
||||
import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO;
|
||||
|
|
@ -81,8 +84,10 @@ import org.apache.cloudstack.secret.PassphraseVO;
|
|||
import org.apache.cloudstack.secret.dao.PassphraseDao;
|
||||
import org.apache.cloudstack.snapshot.SnapshotHelper;
|
||||
import org.apache.cloudstack.storage.command.CommandResult;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
||||
|
|
@ -198,6 +203,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
@Inject
|
||||
protected PrimaryDataStoreDao _storagePoolDao = null;
|
||||
@Inject
|
||||
protected ImageStoreDao imageStoreDao;
|
||||
@Inject
|
||||
protected TemplateDataStoreDao _vmTemplateStoreDao = null;
|
||||
@Inject
|
||||
protected VolumeDao _volumeDao;
|
||||
|
|
@ -257,6 +264,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
StoragePoolHostDao storagePoolHostDao;
|
||||
@Inject
|
||||
DiskOfferingDao diskOfferingDao;
|
||||
@Inject
|
||||
ConfigDepot configDepot;
|
||||
@Inject
|
||||
ConfigurationDao configurationDao;
|
||||
|
||||
@Inject
|
||||
protected SnapshotHelper snapshotHelper;
|
||||
|
|
@ -574,6 +585,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
throw e;
|
||||
}
|
||||
|
||||
boolean kvmIncrementalSnapshot = SnapshotManager.kvmIncrementalSnapshot.valueIn(_hostDao.findClusterIdByVolumeInfo(snapInfo.getBaseVolume()));
|
||||
if (kvmIncrementalSnapshot && DataStoreRole.Image.equals(dataStoreRole)) {
|
||||
snapInfo = snapshotHelper.convertSnapshotIfNeeded(snapInfo);
|
||||
}
|
||||
|
||||
// don't try to perform a sync if the DataStoreRole of the snapshot is equal to DataStoreRole.Primary
|
||||
if (!DataStoreRole.Primary.equals(dataStoreRole) || kvmSnapshotOnlyInPrimaryStorage) {
|
||||
try {
|
||||
|
|
@ -1974,10 +1990,29 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
_vmCloneSettingDao.persist(vmCloneSettingVO);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<List<String>, Set<String>> getVolumeCheckpointPathsAndImageStoreUrls(long volumeId, HypervisorType hypervisorType) {
|
||||
List<String> checkpointPaths = new ArrayList<>();
|
||||
Set<Long> imageStoreIds = new HashSet<>();
|
||||
Set<String> imageStoreUrls = new HashSet<>();
|
||||
if (HypervisorType.KVM.equals(hypervisorType)) {
|
||||
List<SnapshotDataStoreVO> snapshotDataStoreVos = _snapshotDataStoreDao.listReadyByVolumeIdAndCheckpointPathNotNull(volumeId);
|
||||
snapshotDataStoreVos.forEach(snapshotDataStoreVO -> {
|
||||
checkpointPaths.add(snapshotDataStoreVO.getKvmCheckpointPath());
|
||||
if (DataStoreRole.Image.equals(snapshotDataStoreVO.getRole())) {
|
||||
imageStoreIds.add(snapshotDataStoreVO.getDataStoreId());
|
||||
}
|
||||
});
|
||||
imageStoreUrls = imageStoreIds.stream().map(id -> imageStoreDao.findById(id).getUrl()).collect(Collectors.toSet());
|
||||
logger.debug(String.format("Found [%s] snapshots [%s] that have checkpoints for volume with id [%s].", snapshotDataStoreVos.size(), snapshotDataStoreVos, volumeId));
|
||||
}
|
||||
|
||||
return new Pair<>(checkpointPaths, imageStoreUrls);
|
||||
}
|
||||
|
||||
private void handleCheckAndRepairVolume(Volume vol, Long hostId) {
|
||||
Host host = _hostDao.findById(hostId);
|
||||
try {
|
||||
|
|
@ -2018,7 +2053,9 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
|
||||
@Override
|
||||
public ConfigKey<?>[] getConfigKeys() {
|
||||
return new ConfigKey<?>[] {RecreatableSystemVmEnabled, MaxVolumeSize, StorageHAMigrationEnabled, StorageMigrationEnabled, CustomDiskOfferingMaxSize, CustomDiskOfferingMinSize, VolumeUrlCheck};
|
||||
return new ConfigKey<?>[] {
|
||||
RecreatableSystemVmEnabled, MaxVolumeSize, StorageHAMigrationEnabled, StorageMigrationEnabled,
|
||||
CustomDiskOfferingMaxSize, CustomDiskOfferingMinSize, VolumeUrlCheck, VolumeAllocationAlgorithm};
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -2031,6 +2068,18 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean start() {
|
||||
if (configDepot.isNewConfig(VolumeAllocationAlgorithm)) {
|
||||
String vmAllocationAlgo = DeploymentClusterPlanner.VmAllocationAlgorithm.value();
|
||||
if (com.cloud.utils.StringUtils.isNotEmpty(vmAllocationAlgo) && !VolumeAllocationAlgorithm.defaultValue().equalsIgnoreCase(vmAllocationAlgo)) {
|
||||
logger.debug("Updating value for configuration: {} to {}", VolumeAllocationAlgorithm.key(), vmAllocationAlgo);
|
||||
configurationDao.update(VolumeAllocationAlgorithm.key(), vmAllocationAlgo);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private void cleanupVolumeDuringAttachFailure(Long volumeId, Long vmId) {
|
||||
VolumeVO volume = _volsDao.findById(volumeId);
|
||||
if (volume == null) {
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ import org.apache.cloudstack.framework.config.ConfigKey;
|
|||
import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
|
|
@ -85,7 +86,9 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
|||
import com.cloud.deploy.DeploymentPlanningManager;
|
||||
import com.cloud.domain.DomainVO;
|
||||
import com.cloud.domain.dao.DomainDao;
|
||||
import com.cloud.exception.AgentUnavailableException;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
|
|
@ -114,6 +117,7 @@ import com.cloud.storage.dao.StoragePoolHostDao;
|
|||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VMTemplateZoneDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountVO;
|
||||
|
|
@ -153,6 +157,9 @@ public class VirtualMachineManagerImplTest {
|
|||
@Mock
|
||||
private ServiceOfferingVO serviceOfferingMock;
|
||||
|
||||
@Mock
|
||||
private SnapshotManager snapshotManagerMock;
|
||||
|
||||
@Mock
|
||||
private DiskOfferingVO diskOfferingMock;
|
||||
|
||||
|
|
@ -1304,4 +1311,74 @@ public class VirtualMachineManagerImplTest {
|
|||
Assert.assertEquals(manufacturer, to.getMetadataManufacturer());
|
||||
Assert.assertEquals(product, to.getMetadataProductName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void recreateCheckpointsKvmOnVmAfterMigrationTestReturnIfNotKvm() {
|
||||
Mockito.doReturn(HypervisorType.VMware).when(vmInstanceMock).getHypervisorType();
|
||||
|
||||
virtualMachineManagerImpl.recreateCheckpointsKvmOnVmAfterMigration(vmInstanceMock, 0);
|
||||
|
||||
Mockito.verify(volumeDaoMock, Mockito.never()).findByInstance(Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void recreateCheckpointsKvmOnVmAfterMigrationTestReturnIfVolumesDoNotHaveCheckpoints() throws OperationTimedoutException, AgentUnavailableException {
|
||||
Mockito.doReturn(HypervisorType.KVM).when(vmInstanceMock).getHypervisorType();
|
||||
Mockito.doReturn(new ArrayList<VolumeObjectTO>()).when(virtualMachineManagerImpl).getVmVolumesWithCheckpointsToRecreate(Mockito.any());
|
||||
|
||||
virtualMachineManagerImpl.recreateCheckpointsKvmOnVmAfterMigration(vmInstanceMock, 0);
|
||||
|
||||
Mockito.verify(agentManagerMock, Mockito.never()).send(Mockito.anyLong(), (Command) any());
|
||||
}
|
||||
|
||||
@Test (expected = CloudRuntimeException.class)
|
||||
public void recreateCheckpointsKvmOnVmAfterMigrationTestAgentUnavailableThrowsCloudRuntimeExceptionAndEndsSnapshotChains() throws OperationTimedoutException, AgentUnavailableException {
|
||||
Mockito.doReturn(HypervisorType.KVM).when(vmInstanceMock).getHypervisorType();
|
||||
Mockito.doReturn(List.of(new VolumeObjectTO())).when(virtualMachineManagerImpl).getVmVolumesWithCheckpointsToRecreate(Mockito.any());
|
||||
|
||||
Mockito.doThrow(new AgentUnavailableException(0)).when(agentManagerMock).send(Mockito.anyLong(), (Command) any());
|
||||
Mockito.doNothing().when(snapshotManagerMock).endSnapshotChainForVolume(Mockito.anyLong(), Mockito.any());
|
||||
|
||||
virtualMachineManagerImpl.recreateCheckpointsKvmOnVmAfterMigration(vmInstanceMock, 0);
|
||||
|
||||
Mockito.verify(snapshotManagerMock, Mockito.times(1)).endSnapshotChainForVolume(Mockito.anyLong(),any());
|
||||
}
|
||||
|
||||
@Test (expected = CloudRuntimeException.class)
|
||||
public void recreateCheckpointsKvmOnVmAfterMigrationTestOperationTimedoutExceptionThrowsCloudRuntimeExceptionAndEndsSnapshotChains() throws OperationTimedoutException, AgentUnavailableException {
|
||||
Mockito.doReturn(HypervisorType.KVM).when(vmInstanceMock).getHypervisorType();
|
||||
Mockito.doReturn(List.of(new VolumeObjectTO())).when(virtualMachineManagerImpl).getVmVolumesWithCheckpointsToRecreate(Mockito.any());
|
||||
|
||||
Mockito.doThrow(new OperationTimedoutException(null, 0, 0, 0, false)).when(agentManagerMock).send(Mockito.anyLong(), (Command) any());
|
||||
Mockito.doNothing().when(snapshotManagerMock).endSnapshotChainForVolume(Mockito.anyLong(), Mockito.any());
|
||||
|
||||
virtualMachineManagerImpl.recreateCheckpointsKvmOnVmAfterMigration(vmInstanceMock, 0);
|
||||
|
||||
Mockito.verify(snapshotManagerMock, Mockito.times(1)).endSnapshotChainForVolume(Mockito.anyLong(),any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void recreateCheckpointsKvmOnVmAfterMigrationTestRecreationFails() throws OperationTimedoutException, AgentUnavailableException {
|
||||
Mockito.doReturn(HypervisorType.KVM).when(vmInstanceMock).getHypervisorType();
|
||||
Mockito.doReturn(List.of(new VolumeObjectTO())).when(virtualMachineManagerImpl).getVmVolumesWithCheckpointsToRecreate(Mockito.any());
|
||||
|
||||
Mockito.doReturn(new com.cloud.agent.api.Answer(null, false, null)).when(agentManagerMock).send(Mockito.anyLong(), (Command) any());
|
||||
Mockito.doNothing().when(snapshotManagerMock).endSnapshotChainForVolume(Mockito.anyLong(), Mockito.any());
|
||||
|
||||
virtualMachineManagerImpl.recreateCheckpointsKvmOnVmAfterMigration(vmInstanceMock, 0);
|
||||
|
||||
Mockito.verify(snapshotManagerMock, Mockito.times(1)).endSnapshotChainForVolume(Mockito.anyLong(),any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void recreateCheckpointsKvmOnVmAfterMigrationTestRecreationSucceeds() throws OperationTimedoutException, AgentUnavailableException {
|
||||
Mockito.doReturn(HypervisorType.KVM).when(vmInstanceMock).getHypervisorType();
|
||||
Mockito.doReturn(List.of(new VolumeObjectTO())).when(virtualMachineManagerImpl).getVmVolumesWithCheckpointsToRecreate(Mockito.any());
|
||||
|
||||
Mockito.doReturn(new com.cloud.agent.api.Answer(null, true, null)).when(agentManagerMock).send(Mockito.anyLong(), (Command) any());
|
||||
|
||||
virtualMachineManagerImpl.recreateCheckpointsKvmOnVmAfterMigration(vmInstanceMock, 0);
|
||||
|
||||
Mockito.verify(snapshotManagerMock, Mockito.never()).endSnapshotChainForVolume(Mockito.anyLong(),any());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,13 +18,32 @@ package org.apache.cloudstack.engine.orchestration;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.deploy.DeploymentClusterPlanner;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.StorageAccessException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.Volume.Type;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.uservm.UserVm;
|
||||
import com.cloud.utils.db.EntityManager;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.utils.Pair;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
||||
|
|
@ -32,6 +51,17 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.secret.PassphraseVO;
|
||||
import org.apache.cloudstack.secret.dao.PassphraseDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
|
@ -45,14 +75,9 @@ import org.mockito.Spy;
|
|||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.exception.StorageAccessException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.Volume.Type;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import static org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService.VolumeAllocationAlgorithm;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class VolumeOrchestratorTest {
|
||||
|
|
@ -65,6 +90,24 @@ public class VolumeOrchestratorTest {
|
|||
protected VolumeDataFactory volumeDataFactory;
|
||||
@Mock
|
||||
protected VolumeDao volumeDao;
|
||||
@Mock
|
||||
protected PassphraseDao passphraseDao;
|
||||
@Mock
|
||||
protected PrimaryDataStoreDao storagePoolDao;
|
||||
@Mock
|
||||
protected EntityManager entityMgr;
|
||||
@Mock
|
||||
ConfigDepot configDepot;
|
||||
@Mock
|
||||
ConfigurationDao configurationDao;
|
||||
|
||||
|
||||
@Mock
|
||||
private SnapshotDataStoreDao snapshotDataStoreDaoMock;
|
||||
|
||||
@Mock
|
||||
private ImageStoreDao imageStoreDaoMock;
|
||||
|
||||
|
||||
@Spy
|
||||
@InjectMocks
|
||||
|
|
@ -72,6 +115,9 @@ public class VolumeOrchestratorTest {
|
|||
|
||||
private static final Long DEFAULT_ACCOUNT_PS_RESOURCE_COUNT = 100L;
|
||||
private Long accountPSResourceCount;
|
||||
private static final long MOCK_VM_ID = 202L;
|
||||
private static final long MOCK_POOL_ID = 303L;
|
||||
private static final String MOCK_VM_NAME = "Test-VM";
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
|
|
@ -208,4 +254,390 @@ public class VolumeOrchestratorTest {
|
|||
Mockito.verify(volume, Mockito.times(1)).setChainInfo(chainInfo);
|
||||
Mockito.verify(volume, Mockito.times(1)).setState(Volume.State.Ready);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAllocateDuplicateVolumeVOBasic() {
|
||||
Volume oldVol = Mockito.mock(Volume.class);
|
||||
Mockito.when(oldVol.getVolumeType()).thenReturn(Volume.Type.ROOT);
|
||||
Mockito.when(oldVol.getName()).thenReturn("testVol");
|
||||
Mockito.when(oldVol.getDataCenterId()).thenReturn(1L);
|
||||
Mockito.when(oldVol.getDomainId()).thenReturn(2L);
|
||||
Mockito.when(oldVol.getAccountId()).thenReturn(3L);
|
||||
Mockito.when(oldVol.getDiskOfferingId()).thenReturn(4L);
|
||||
Mockito.when(oldVol.getProvisioningType()).thenReturn(Storage.ProvisioningType.THIN);
|
||||
Mockito.when(oldVol.getSize()).thenReturn(10L);
|
||||
Mockito.when(oldVol.getMinIops()).thenReturn(100L);
|
||||
Mockito.when(oldVol.getMaxIops()).thenReturn(200L);
|
||||
Mockito.when(oldVol.get_iScsiName()).thenReturn("iqn.test");
|
||||
Mockito.when(oldVol.getTemplateId()).thenReturn(5L);
|
||||
Mockito.when(oldVol.getDeviceId()).thenReturn(1L);
|
||||
Mockito.when(oldVol.getInstanceId()).thenReturn(6L);
|
||||
Mockito.when(oldVol.isRecreatable()).thenReturn(false);
|
||||
Mockito.when(oldVol.getFormat()).thenReturn(Storage.ImageFormat.QCOW2);
|
||||
Mockito.when(oldVol.getPassphraseId()).thenReturn(null); // no encryption
|
||||
|
||||
VolumeVO persistedVol = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeDao.persist(Mockito.any(VolumeVO.class))).thenReturn(persistedVol);
|
||||
|
||||
VolumeVO result = volumeOrchestrator.allocateDuplicateVolumeVO(oldVol, null, null);
|
||||
assertNotNull(result);
|
||||
Mockito.verify(volumeDao, Mockito.times(1)).persist(Mockito.any(VolumeVO.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAllocateDuplicateVolumeVOWithEncryption() {
|
||||
Volume oldVol = Mockito.mock(Volume.class);
|
||||
Mockito.when(oldVol.getVolumeType()).thenReturn(Volume.Type.ROOT);
|
||||
Mockito.when(oldVol.getName()).thenReturn("secureVol");
|
||||
Mockito.when(oldVol.getDataCenterId()).thenReturn(1L);
|
||||
Mockito.when(oldVol.getDomainId()).thenReturn(2L);
|
||||
Mockito.when(oldVol.getAccountId()).thenReturn(3L);
|
||||
Mockito.when(oldVol.getDiskOfferingId()).thenReturn(4L);
|
||||
Mockito.when(oldVol.getProvisioningType()).thenReturn(Storage.ProvisioningType.THIN);
|
||||
Mockito.when(oldVol.getSize()).thenReturn(10L);
|
||||
Mockito.when(oldVol.getMinIops()).thenReturn(100L);
|
||||
Mockito.when(oldVol.getMaxIops()).thenReturn(200L);
|
||||
Mockito.when(oldVol.get_iScsiName()).thenReturn("iqn.secure");
|
||||
Mockito.when(oldVol.getTemplateId()).thenReturn(5L);
|
||||
Mockito.when(oldVol.getDeviceId()).thenReturn(2L);
|
||||
Mockito.when(oldVol.getInstanceId()).thenReturn(7L);
|
||||
Mockito.when(oldVol.isRecreatable()).thenReturn(true);
|
||||
Mockito.when(oldVol.getFormat()).thenReturn(Storage.ImageFormat.RAW);
|
||||
Mockito.when(oldVol.getPassphraseId()).thenReturn(42L);
|
||||
|
||||
PassphraseVO passphrase = Mockito.mock(PassphraseVO.class);
|
||||
Mockito.when(passphrase.getId()).thenReturn(999L);
|
||||
Mockito.when(passphraseDao.persist(Mockito.any())).thenReturn(passphrase);
|
||||
|
||||
VolumeVO persistedVol = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeDao.persist(Mockito.any())).thenReturn(persistedVol);
|
||||
|
||||
VolumeVO result = volumeOrchestrator.allocateDuplicateVolumeVO(oldVol, null, null);
|
||||
assertNotNull(result);
|
||||
Mockito.verify(passphraseDao).persist(Mockito.any(PassphraseVO.class));
|
||||
Mockito.verify(volumeDao).persist(Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAllocateDuplicateVolumeVOWithTemplateOverride() {
|
||||
Volume oldVol = Mockito.mock(Volume.class);
|
||||
Mockito.when(oldVol.getVolumeType()).thenReturn(Volume.Type.ROOT);
|
||||
Mockito.when(oldVol.getName()).thenReturn("tmplVol");
|
||||
Mockito.when(oldVol.getDataCenterId()).thenReturn(1L);
|
||||
Mockito.when(oldVol.getDomainId()).thenReturn(2L);
|
||||
Mockito.when(oldVol.getAccountId()).thenReturn(3L);
|
||||
Mockito.when(oldVol.getDiskOfferingId()).thenReturn(4L);
|
||||
Mockito.when(oldVol.getProvisioningType()).thenReturn(Storage.ProvisioningType.THIN);
|
||||
Mockito.when(oldVol.getSize()).thenReturn(20L);
|
||||
Mockito.when(oldVol.getMinIops()).thenReturn(50L);
|
||||
Mockito.when(oldVol.getMaxIops()).thenReturn(250L);
|
||||
Mockito.when(oldVol.get_iScsiName()).thenReturn("iqn.tmpl");
|
||||
|
||||
VolumeVO persistedVol = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeDao.persist(Mockito.any())).thenReturn(persistedVol);
|
||||
|
||||
PassphraseVO mockPassPhrase = Mockito.mock(PassphraseVO.class);
|
||||
Mockito.when(passphraseDao.persist(Mockito.any())).thenReturn(mockPassPhrase);
|
||||
|
||||
VolumeVO result = volumeOrchestrator.allocateDuplicateVolumeVO(oldVol, null, 222L);
|
||||
assertNotNull(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAllocateDuplicateVolumeVOEncryptionFromOldVolumeOnly() {
|
||||
Volume oldVol = Mockito.mock(Volume.class);
|
||||
Mockito.when(oldVol.getVolumeType()).thenReturn(Volume.Type.ROOT);
|
||||
Mockito.when(oldVol.getName()).thenReturn("vol-old");
|
||||
Mockito.when(oldVol.getDataCenterId()).thenReturn(1L);
|
||||
Mockito.when(oldVol.getDomainId()).thenReturn(2L);
|
||||
Mockito.when(oldVol.getAccountId()).thenReturn(3L);
|
||||
Mockito.when(oldVol.getDiskOfferingId()).thenReturn(4L);
|
||||
Mockito.when(oldVol.getProvisioningType()).thenReturn(Storage.ProvisioningType.SPARSE);
|
||||
Mockito.when(oldVol.getSize()).thenReturn(30L);
|
||||
Mockito.when(oldVol.getMinIops()).thenReturn(10L);
|
||||
Mockito.when(oldVol.getMaxIops()).thenReturn(500L);
|
||||
Mockito.when(oldVol.get_iScsiName()).thenReturn("iqn.old");
|
||||
Mockito.when(oldVol.getTemplateId()).thenReturn(123L);
|
||||
Mockito.when(oldVol.getDeviceId()).thenReturn(1L);
|
||||
Mockito.when(oldVol.getInstanceId()).thenReturn(100L);
|
||||
Mockito.when(oldVol.isRecreatable()).thenReturn(false);
|
||||
Mockito.when(oldVol.getFormat()).thenReturn(Storage.ImageFormat.RAW);
|
||||
|
||||
DiskOffering diskOffering = Mockito.mock(DiskOffering.class);
|
||||
Mockito.when(diskOffering.getEncrypt()).thenReturn(false); // explicitly disables encryption
|
||||
|
||||
VolumeVO persistedVol = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeDao.persist(Mockito.any())).thenReturn(persistedVol);
|
||||
|
||||
VolumeVO result = volumeOrchestrator.allocateDuplicateVolumeVO(oldVol, diskOffering, null);
|
||||
assertNotNull(result);
|
||||
Mockito.verify(volumeDao).persist(Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVolumeOnSharedStoragePoolTrue() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getPoolId()).thenReturn(MOCK_POOL_ID);
|
||||
|
||||
StoragePoolVO pool = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(pool.getScope()).thenReturn(ScopeType.CLUSTER); // Shared scope
|
||||
Mockito.when(storagePoolDao.findById(MOCK_POOL_ID)).thenReturn(pool);
|
||||
|
||||
assertTrue(volumeOrchestrator.volumeOnSharedStoragePool(volume));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVolumeOnSharedStoragePoolFalseHostScope() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getPoolId()).thenReturn(MOCK_POOL_ID);
|
||||
|
||||
StoragePoolVO pool = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(pool.getScope()).thenReturn(ScopeType.HOST); // Local scope
|
||||
Mockito.when(storagePoolDao.findById(MOCK_POOL_ID)).thenReturn(pool);
|
||||
|
||||
Assert.assertFalse(volumeOrchestrator.volumeOnSharedStoragePool(volume));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVolumeOnSharedStoragePoolFalseNoPool() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getPoolId()).thenReturn(null); // No pool associated
|
||||
|
||||
Assert.assertFalse(volumeOrchestrator.volumeOnSharedStoragePool(volume));
|
||||
Mockito.verify(storagePoolDao, Mockito.never()).findById(Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVolumeOnSharedStoragePoolFalsePoolNotFound() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getPoolId()).thenReturn(MOCK_POOL_ID);
|
||||
|
||||
Mockito.when(storagePoolDao.findById(MOCK_POOL_ID)).thenReturn(null); // Pool not found in DB
|
||||
|
||||
Assert.assertFalse(volumeOrchestrator.volumeOnSharedStoragePool(volume));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testVolumeInactiveNoVmId() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getInstanceId()).thenReturn(null);
|
||||
assertTrue(volumeOrchestrator.volumeInactive(volume));
|
||||
Mockito.verify(entityMgr, Mockito.never()).findById(Mockito.eq(UserVm.class), Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVolumeInactiveVmNotFound() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getInstanceId()).thenReturn(MOCK_VM_ID);
|
||||
Mockito.when(entityMgr.findById(UserVm.class, MOCK_VM_ID)).thenReturn(null);
|
||||
assertTrue(volumeOrchestrator.volumeInactive(volume));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVolumeInactiveVmStopped() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getInstanceId()).thenReturn(MOCK_VM_ID);
|
||||
UserVm vm = Mockito.mock(UserVm.class);
|
||||
Mockito.when(vm.getState()).thenReturn(VirtualMachine.State.Stopped);
|
||||
Mockito.when(entityMgr.findById(UserVm.class, MOCK_VM_ID)).thenReturn(vm);
|
||||
assertTrue(volumeOrchestrator.volumeInactive(volume));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVolumeInactiveVmDestroyed() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getInstanceId()).thenReturn(MOCK_VM_ID);
|
||||
UserVm vm = Mockito.mock(UserVm.class);
|
||||
Mockito.when(vm.getState()).thenReturn(VirtualMachine.State.Destroyed);
|
||||
Mockito.when(entityMgr.findById(UserVm.class, MOCK_VM_ID)).thenReturn(vm);
|
||||
assertTrue(volumeOrchestrator.volumeInactive(volume));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVolumeInactiveVmRunning() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getInstanceId()).thenReturn(MOCK_VM_ID);
|
||||
UserVm vm = Mockito.mock(UserVm.class);
|
||||
Mockito.when(vm.getState()).thenReturn(VirtualMachine.State.Running); // Active state
|
||||
Mockito.when(entityMgr.findById(UserVm.class, MOCK_VM_ID)).thenReturn(vm);
|
||||
Assert.assertFalse(volumeOrchestrator.volumeInactive(volume));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetVmNameOnVolumeNoVmId() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getInstanceId()).thenReturn(null);
|
||||
Assert.assertNull(volumeOrchestrator.getVmNameOnVolume(volume));
|
||||
Mockito.verify(entityMgr, Mockito.never()).findById(Mockito.eq(VirtualMachine.class), Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetVmNameOnVolumeVmNotFound() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getInstanceId()).thenReturn(MOCK_VM_ID);
|
||||
Mockito.when(entityMgr.findById(VirtualMachine.class, MOCK_VM_ID)).thenReturn(null);
|
||||
Assert.assertNull(volumeOrchestrator.getVmNameOnVolume(volume));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetVmNameOnVolumeSuccess() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volume.getInstanceId()).thenReturn(MOCK_VM_ID);
|
||||
VirtualMachine vm = Mockito.mock(VirtualMachine.class);
|
||||
Mockito.when(vm.getInstanceName()).thenReturn(MOCK_VM_NAME);
|
||||
Mockito.when(entityMgr.findById(VirtualMachine.class, MOCK_VM_ID)).thenReturn(vm);
|
||||
Assert.assertEquals(MOCK_VM_NAME, volumeOrchestrator.getVmNameOnVolume(volume));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateVolumeSizeRangeValid() throws Exception {
|
||||
overrideDefaultConfigValue(VolumeOrchestrator.MaxVolumeSize, "2000");
|
||||
assertTrue(volumeOrchestrator.validateVolumeSizeRange(1024 * 1024 * 1024)); // 1 GiB
|
||||
assertTrue(volumeOrchestrator.validateVolumeSizeRange(2000 * 1024 * 1024 * 1024)); // 2 TiB
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void testValidateVolumeSizeRangeTooSmall() {
|
||||
volumeOrchestrator.validateVolumeSizeRange(1024L); // Less than 1GiB
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void testValidateVolumeSizeRangeNegative() {
|
||||
volumeOrchestrator.validateVolumeSizeRange(-10); // Negative size
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void testValidateVolumeSizeRangeTooLarge() throws Exception {
|
||||
overrideDefaultConfigValue(VolumeOrchestrator.MaxVolumeSize, "100L");
|
||||
volumeOrchestrator.validateVolumeSizeRange(101);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCanVmRestartOnAnotherServerAllShared() {
|
||||
VolumeVO vol1 = Mockito.mock(VolumeVO.class);
|
||||
VolumeVO vol2 = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(vol1.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(vol2.getPoolId()).thenReturn(20L);
|
||||
Mockito.when(vol1.isRecreatable()).thenReturn(false);
|
||||
Mockito.when(vol2.isRecreatable()).thenReturn(false);
|
||||
|
||||
|
||||
StoragePoolVO pool1 = Mockito.mock(StoragePoolVO.class);
|
||||
StoragePoolVO pool2 = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(pool1.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); // Shared
|
||||
Mockito.when(pool2.getPoolType()).thenReturn(Storage.StoragePoolType.RBD); // Shared
|
||||
|
||||
Mockito.when(volumeDao.findCreatedByInstance(MOCK_VM_ID)).thenReturn(List.of(vol1, vol2));
|
||||
Mockito.when(storagePoolDao.findById(10L)).thenReturn(pool1);
|
||||
Mockito.when(storagePoolDao.findById(20L)).thenReturn(pool2);
|
||||
|
||||
|
||||
assertTrue(volumeOrchestrator.canVmRestartOnAnotherServer(MOCK_VM_ID));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCanVmRestartOnAnotherServerOneLocalNotRecreatable() {
|
||||
VolumeVO vol1 = Mockito.mock(VolumeVO.class);
|
||||
VolumeVO vol2 = Mockito.mock(VolumeVO.class); // Local, not recreatable
|
||||
Mockito.when(vol1.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(vol2.getPoolId()).thenReturn(30L);
|
||||
Mockito.when(vol1.isRecreatable()).thenReturn(false);
|
||||
Mockito.when(vol2.isRecreatable()).thenReturn(false); // Not recreatable
|
||||
|
||||
StoragePoolVO pool1 = Mockito.mock(StoragePoolVO.class);
|
||||
StoragePoolVO pool2 = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(pool1.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); // Shared
|
||||
Mockito.when(pool2.getPoolType()).thenReturn(Storage.StoragePoolType.LVM); // Local
|
||||
|
||||
Mockito.when(volumeDao.findCreatedByInstance(MOCK_VM_ID)).thenReturn(List.of(vol1, vol2));
|
||||
Mockito.when(storagePoolDao.findById(10L)).thenReturn(pool1);
|
||||
Mockito.when(storagePoolDao.findById(30L)).thenReturn(pool2);
|
||||
|
||||
Assert.assertFalse("VM restart should be false if a non-recreatable local disk exists",
|
||||
volumeOrchestrator.canVmRestartOnAnotherServer(MOCK_VM_ID));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCanVmRestartOnAnotherServerOneLocalRecreatable() {
|
||||
VolumeVO vol1 = Mockito.mock(VolumeVO.class);
|
||||
VolumeVO vol2 = Mockito.mock(VolumeVO.class); // Local, but recreatable
|
||||
Mockito.when(vol1.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(vol2.getPoolId()).thenReturn(30L);
|
||||
Mockito.when(vol1.isRecreatable()).thenReturn(false);
|
||||
Mockito.when(vol2.isRecreatable()).thenReturn(true); // Recreatable
|
||||
|
||||
StoragePoolVO pool1 = Mockito.mock(StoragePoolVO.class);
|
||||
StoragePoolVO pool2 = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(pool1.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); // Shared
|
||||
|
||||
Mockito.when(volumeDao.findCreatedByInstance(MOCK_VM_ID)).thenReturn(List.of(vol1, vol2));
|
||||
Mockito.when(storagePoolDao.findById(10L)).thenReturn(pool1);
|
||||
Mockito.when(storagePoolDao.findById(30L)).thenReturn(pool2);
|
||||
|
||||
assertTrue("VM restart should be true if local disk is recreatable",
|
||||
volumeOrchestrator.canVmRestartOnAnotherServer(MOCK_VM_ID));
|
||||
}
|
||||
|
||||
private void overrideDefaultConfigValue(final ConfigKey configKey, final String value) throws IllegalAccessException, NoSuchFieldException {
|
||||
final Field f = ConfigKey.class.getDeclaredField("_defaultValue");
|
||||
f.setAccessible(true);
|
||||
f.set(configKey, value);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStart() throws Exception {
|
||||
Mockito.when(configDepot.isNewConfig(VolumeAllocationAlgorithm)).thenReturn(true);
|
||||
overrideDefaultConfigValue(DeploymentClusterPlanner.VmAllocationAlgorithm, "firstfit");
|
||||
Mockito.when(configurationDao.update(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
|
||||
volumeOrchestrator.start();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConfigKeys() {
|
||||
assertTrue(volumeOrchestrator.getConfigKeys().length > 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getVolumeCheckpointPathsAndImageStoreUrlsTestReturnEmptyListsIfNotKVM() {
|
||||
Pair<List<String>, Set<String>> result = volumeOrchestrator.getVolumeCheckpointPathsAndImageStoreUrls(0, Hypervisor.HypervisorType.VMware);
|
||||
|
||||
Assert.assertTrue(result.first().isEmpty());
|
||||
Assert.assertTrue(result.second().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getVolumeCheckpointPathsAndImageStoreUrlsTestReturnCheckpointIfKVM() {
|
||||
SnapshotDataStoreVO snapshotDataStoreVO = new SnapshotDataStoreVO();
|
||||
snapshotDataStoreVO.setKvmCheckpointPath("Test");
|
||||
snapshotDataStoreVO.setRole(DataStoreRole.Primary);
|
||||
|
||||
Mockito.doReturn(List.of(snapshotDataStoreVO)).when(snapshotDataStoreDaoMock).listReadyByVolumeIdAndCheckpointPathNotNull(Mockito.anyLong());
|
||||
|
||||
Pair<List<String>, Set<String>> result = volumeOrchestrator.getVolumeCheckpointPathsAndImageStoreUrls(0, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
Assert.assertEquals("Test", result.first().get(0));
|
||||
Assert.assertTrue(result.second().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getVolumeCheckpointPathsAndImageStoreUrlsTestReturnCheckpointIfKVMAndImageStore() {
|
||||
SnapshotDataStoreVO snapshotDataStoreVO = new SnapshotDataStoreVO();
|
||||
snapshotDataStoreVO.setKvmCheckpointPath("Test");
|
||||
snapshotDataStoreVO.setRole(DataStoreRole.Image);
|
||||
snapshotDataStoreVO.setDataStoreId(13);
|
||||
|
||||
Mockito.doReturn(List.of(snapshotDataStoreVO)).when(snapshotDataStoreDaoMock).listReadyByVolumeIdAndCheckpointPathNotNull(Mockito.anyLong());
|
||||
|
||||
ImageStoreVO imageStoreVO = new ImageStoreVO();
|
||||
imageStoreVO.setUrl("URL");
|
||||
Mockito.doReturn(imageStoreVO).when(imageStoreDaoMock).findById(Mockito.anyLong());
|
||||
|
||||
Pair<List<String>, Set<String>> result = volumeOrchestrator.getVolumeCheckpointPathsAndImageStoreUrls(0, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
Assert.assertEquals("Test", result.first().get(0));
|
||||
Assert.assertTrue(result.second().contains("URL"));
|
||||
Assert.assertEquals(1, result.second().size());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -64,5 +64,5 @@ public interface CapacityDao extends GenericDao<CapacityVO, Long> {
|
|||
|
||||
float findClusterConsumption(Long clusterId, short capacityType, long computeRequested);
|
||||
|
||||
List<Long> orderHostsByFreeCapacity(Long zoneId, Long clusterId, short capacityType);
|
||||
Pair<List<Long>, Map<Long, Double>> orderHostsByFreeCapacity(Long zoneId, Long clusterId, short capacityType);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1028,10 +1028,11 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<Long> orderHostsByFreeCapacity(Long zoneId, Long clusterId, short capacityTypeForOrdering){
|
||||
public Pair<List<Long>, Map<Long, Double>> orderHostsByFreeCapacity(Long zoneId, Long clusterId, short capacityTypeForOrdering){
|
||||
TransactionLegacy txn = TransactionLegacy.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
List<Long> result = new ArrayList<>();
|
||||
Map<Long, Double> hostCapacityMap = new HashMap<>();
|
||||
StringBuilder sql = new StringBuilder(ORDER_HOSTS_BY_FREE_CAPACITY_PART1);
|
||||
if (zoneId != null) {
|
||||
sql.append(" AND data_center_id = ?");
|
||||
|
|
@ -1054,9 +1055,11 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
|
|||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
result.add(rs.getLong(1));
|
||||
Long hostId = rs.getLong(1);
|
||||
result.add(hostId);
|
||||
hostCapacityMap.put(hostId, rs.getDouble(2));
|
||||
}
|
||||
return result;
|
||||
return new Pair<>(result, hostCapacityMap);
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + sql, e);
|
||||
} catch (Throwable e) {
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import com.cloud.resource.ResourceState;
|
|||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
import com.cloud.utils.fsm.StateDao;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
|
||||
/**
|
||||
* Data Access Object for server
|
||||
|
|
@ -218,4 +219,6 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
|
|||
List<CPU.CPUArch> listDistinctArchTypes(final Long clusterId);
|
||||
|
||||
List<HostVO> listByIds(final List<Long> ids);
|
||||
|
||||
Long findClusterIdByVolumeInfo(VolumeInfo volumeInfo);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ import javax.annotation.PostConstruct;
|
|||
import javax.inject.Inject;
|
||||
import javax.persistence.TableGenerator;
|
||||
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
|
||||
|
|
@ -77,6 +79,7 @@ import com.cloud.utils.db.SearchCriteria.Op;
|
|||
import com.cloud.utils.db.TransactionLegacy;
|
||||
import com.cloud.utils.db.UpdateBuilder;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
|
||||
@DB
|
||||
@TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1)
|
||||
|
|
@ -1853,4 +1856,24 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
|
|||
sc.setParameters("id", ids.toArray());
|
||||
return search(sc, null);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Long findClusterIdByVolumeInfo(VolumeInfo volumeInfo) {
|
||||
VirtualMachine virtualMachine = volumeInfo.getAttachedVM();
|
||||
if (virtualMachine == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Long hostId = ObjectUtils.defaultIfNull(virtualMachine.getHostId(), virtualMachine.getLastHostId());
|
||||
Host host = findById(hostId);
|
||||
|
||||
if (host == null) {
|
||||
logger.warn(String.format("VM [%s] has null host on DB, either this VM was never started, or there is some inconsistency on the DB.", virtualMachine.getUuid()));
|
||||
return null;
|
||||
}
|
||||
|
||||
return host.getClusterId();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -284,6 +284,6 @@ public class SnapshotVO implements Snapshot {
|
|||
public String toString() {
|
||||
return String.format("Snapshot %s",
|
||||
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
|
||||
this, "id", "uuid", "name", "volumeId", "version"));
|
||||
this, "id", "uuid", "name", "volumeId", "version", "state"));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,6 +30,8 @@ public interface StoragePoolHostDao extends GenericDao<StoragePoolHostVO, Long>
|
|||
|
||||
public StoragePoolHostVO findByPoolHost(long poolId, long hostId);
|
||||
|
||||
List<StoragePoolHostVO> findByLocalPath(String path);
|
||||
|
||||
List<StoragePoolHostVO> listByHostStatus(long poolId, Status hostStatus);
|
||||
|
||||
List<Long> findHostsConnectedToPools(List<Long> poolIds);
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
|
|||
protected final SearchBuilder<StoragePoolHostVO> PoolSearch;
|
||||
protected final SearchBuilder<StoragePoolHostVO> HostSearch;
|
||||
protected final SearchBuilder<StoragePoolHostVO> PoolHostSearch;
|
||||
protected final SearchBuilder<StoragePoolHostVO> LocalPathSearch;
|
||||
|
||||
protected SearchBuilder<StoragePoolHostVO> poolNotInClusterSearch;
|
||||
|
||||
|
|
@ -77,6 +78,9 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
|
|||
PoolHostSearch.and("host_id", PoolHostSearch.entity().getHostId(), SearchCriteria.Op.EQ);
|
||||
PoolHostSearch.done();
|
||||
|
||||
LocalPathSearch = createSearchBuilder();
|
||||
LocalPathSearch.and("local_path", LocalPathSearch.entity().getLocalPath(), SearchCriteria.Op.EQ);
|
||||
LocalPathSearch.done();
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
|
|
@ -117,6 +121,13 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
|
|||
return findOneIncludingRemovedBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<StoragePoolHostVO> findByLocalPath(String path) {
|
||||
SearchCriteria<StoragePoolHostVO> sc = LocalPathSearch.create();
|
||||
sc.setParameters("local_path", path);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<StoragePoolHostVO> listByHostStatus(long poolId, Status hostStatus) {
|
||||
TransactionLegacy txn = TransactionLegacy.currentTxn();
|
||||
|
|
|
|||
|
|
@ -28,4 +28,6 @@ public interface UsageNetworksDao extends GenericDao<UsageNetworksVO, Long> {
|
|||
void remove(long networkId, Date removed);
|
||||
|
||||
List<UsageNetworksVO> getUsageRecords(Long accountId, Date startDate, Date endDate);
|
||||
|
||||
List<UsageNetworksVO> listAll(long networkId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package com.cloud.usage.dao;
|
|||
import com.cloud.usage.UsageNetworksVO;
|
||||
import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
|
||||
|
|
@ -26,6 +27,7 @@ import org.springframework.stereotype.Component;
|
|||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.util.ArrayList;
|
||||
|
|
@ -40,6 +42,14 @@ public class UsageNetworksDaoImpl extends GenericDaoBase<UsageNetworksVO, Long>
|
|||
" account_id = ? AND ((removed IS NULL AND created <= ?) OR (created BETWEEN ? AND ?) OR (removed BETWEEN ? AND ?) " +
|
||||
" OR ((created <= ?) AND (removed >= ?)))";
|
||||
|
||||
private SearchBuilder<UsageNetworksVO> usageNetworksSearch;
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
usageNetworksSearch = createSearchBuilder();
|
||||
usageNetworksSearch.and("networkId", usageNetworksSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
|
||||
usageNetworksSearch.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(long networkId, long newNetworkOffering, String state) {
|
||||
|
|
@ -131,4 +141,11 @@ public class UsageNetworksDaoImpl extends GenericDaoBase<UsageNetworksVO, Long>
|
|||
|
||||
return usageRecords;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<UsageNetworksVO> listAll(long networkId) {
|
||||
SearchCriteria<UsageNetworksVO> sc = usageNetworksSearch.create();
|
||||
sc.setParameters("networkId", networkId);
|
||||
return listBy(sc);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,10 @@ import java.util.List;
|
|||
|
||||
public interface UsageVpcDao extends GenericDao<UsageVpcVO, Long> {
|
||||
void update(UsageVpcVO usage);
|
||||
|
||||
void remove(long vpcId, Date removed);
|
||||
|
||||
List<UsageVpcVO> getUsageRecords(Long accountId, Date startDate, Date endDate);
|
||||
|
||||
List<UsageVpcVO> listAll(long vpcId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,10 +19,12 @@ package com.cloud.usage.dao;
|
|||
import com.cloud.usage.UsageVpcVO;
|
||||
import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.util.ArrayList;
|
||||
|
|
@ -36,6 +38,15 @@ public class UsageVpcDaoImpl extends GenericDaoBase<UsageVpcVO, Long> implements
|
|||
" account_id = ? AND ((removed IS NULL AND created <= ?) OR (created BETWEEN ? AND ?) OR (removed BETWEEN ? AND ?) " +
|
||||
" OR ((created <= ?) AND (removed >= ?)))";
|
||||
|
||||
private SearchBuilder<UsageVpcVO> usageVpcSearch;
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
usageVpcSearch = createSearchBuilder();
|
||||
usageVpcSearch.and("vpcId", usageVpcSearch.entity().getVpcId(), SearchCriteria.Op.EQ);
|
||||
usageVpcSearch.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(UsageVpcVO usage) {
|
||||
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
||||
|
|
@ -124,4 +135,11 @@ public class UsageVpcDaoImpl extends GenericDaoBase<UsageVpcVO, Long> implements
|
|||
|
||||
return usageRecords;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<UsageVpcVO> listAll(long vpcId) {
|
||||
SearchCriteria<UsageVpcVO> sc = usageVpcSearch.create();
|
||||
sc.setParameters("vpcId", vpcId);
|
||||
return listBy(sc);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -60,6 +60,8 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
|
|||
|
||||
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule);
|
||||
|
||||
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails);
|
||||
|
||||
/**
|
||||
* Find pool by name.
|
||||
*
|
||||
|
|
@ -103,6 +105,8 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
|
|||
|
||||
void updateDetails(long poolId, Map<String, String> details);
|
||||
|
||||
void removeDetails(long poolId);
|
||||
|
||||
Map<String, String> getDetails(long poolId);
|
||||
|
||||
List<String> searchForStoragePoolTags(long poolId);
|
||||
|
|
|
|||
|
|
@ -296,14 +296,19 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
|
|||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule) {
|
||||
return persist(pool, details, tags, isTagARule, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails) {
|
||||
TransactionLegacy txn = TransactionLegacy.currentTxn();
|
||||
txn.start();
|
||||
pool = super.persist(pool);
|
||||
if (details != null) {
|
||||
for (Map.Entry<String, String> detail : details.entrySet()) {
|
||||
StoragePoolDetailVO vo = new StoragePoolDetailVO(pool.getId(), detail.getKey(), detail.getValue(), true);
|
||||
StoragePoolDetailVO vo = new StoragePoolDetailVO(pool.getId(), detail.getKey(), detail.getValue(), displayDetails);
|
||||
_detailsDao.persist(vo);
|
||||
}
|
||||
}
|
||||
|
|
@ -570,6 +575,11 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeDetails(long poolId) {
|
||||
_detailsDao.removeDetails(poolId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getDetails(long poolId) {
|
||||
return _detailsDao.listDetailsKeyPairs(poolId);
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.datastore.db;
|
|||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
|
||||
|
|
@ -46,12 +47,26 @@ StateDao<ObjectInDataStoreStateMachine.State, ObjectInDataStoreStateMachine.Even
|
|||
|
||||
SnapshotDataStoreVO findParent(DataStoreRole role, Long storeId, Long volumeId);
|
||||
|
||||
List<SnapshotDataStoreVO> listBySnapshot(long snapshotId, DataStoreRole role);
|
||||
SnapshotDataStoreVO findParent(DataStoreRole role, Long storeId, Long zoneId, Long volumeId, boolean kvmIncrementalSnapshot, Hypervisor.HypervisorType hypervisorType);
|
||||
|
||||
SnapshotDataStoreVO findBySnapshotIdAndDataStoreRoleAndState(long snapshotId, DataStoreRole role, ObjectInDataStoreStateMachine.State state);
|
||||
|
||||
List<SnapshotDataStoreVO> listReadyByVolumeIdAndCheckpointPathNotNull(long volumeId);
|
||||
|
||||
SnapshotDataStoreVO findOneBySnapshotId(long snapshotId, long zoneId);
|
||||
|
||||
List<SnapshotDataStoreVO> listBySnapshotId(long snapshotId);
|
||||
|
||||
List<SnapshotDataStoreVO> listBySnapshotAndDataStoreRole(long snapshotId, DataStoreRole role);
|
||||
|
||||
List<SnapshotDataStoreVO> listExtractedSnapshotsBeforeDate(Date beforeDate);
|
||||
|
||||
List<SnapshotDataStoreVO> listReadyBySnapshot(long snapshotId, DataStoreRole role);
|
||||
|
||||
SnapshotDataStoreVO findBySourceSnapshot(long snapshotId, DataStoreRole role);
|
||||
|
||||
List<SnapshotDataStoreVO> findBySnapshotIdAndNotInDestroyedHiddenState(long snapshotId);
|
||||
|
||||
List<SnapshotDataStoreVO> listDestroyed(long storeId);
|
||||
|
||||
List<SnapshotDataStoreVO> findBySnapshotId(long snapshotId);
|
||||
|
|
|
|||
|
|
@ -58,26 +58,42 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
|
|||
private static final String SNAPSHOT_ID = "snapshot_id";
|
||||
private static final String VOLUME_ID = "volume_id";
|
||||
private static final String CREATED = "created";
|
||||
private static final String KVM_CHECKPOINT_PATH = "kvm_checkpoint_path";
|
||||
private static final String URL_CREATED_BEFORE = "url_created_before";
|
||||
public static final String DOWNLOAD_URL = "downloadUrl";
|
||||
public static final String DATA_CENTER_ID = "data_center_id";
|
||||
|
||||
private SearchBuilder<SnapshotDataStoreVO> searchFilteringStoreIdEqStoreRoleEqStateNeqRefCntNeq;
|
||||
protected SearchBuilder<SnapshotDataStoreVO> searchFilteringStoreIdEqStateEqStoreRoleEqIdEqUpdateCountEqSnapshotIdEqVolumeIdEq;
|
||||
private SearchBuilder<SnapshotDataStoreVO> stateSearch;
|
||||
private SearchBuilder<SnapshotDataStoreVO> idStateNeqSearch;
|
||||
private SearchBuilder<SnapshotDataStoreVO> idStateNinSearch;
|
||||
protected SearchBuilder<SnapshotVO> snapshotVOSearch;
|
||||
private SearchBuilder<SnapshotDataStoreVO> snapshotCreatedSearch;
|
||||
private SearchBuilder<SnapshotDataStoreVO> dataStoreAndInstallPathSearch;
|
||||
private SearchBuilder<SnapshotDataStoreVO> storeAndSnapshotIdsSearch;
|
||||
private SearchBuilder<SnapshotDataStoreVO> storeSnapshotDownloadStatusSearch;
|
||||
private SearchBuilder<SnapshotDataStoreVO> searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull;
|
||||
private SearchBuilder<SnapshotDataStoreVO> searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore;
|
||||
private SearchBuilder<SnapshotDataStoreVO> searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq;
|
||||
|
||||
|
||||
protected static final List<Hypervisor.HypervisorType> HYPERVISORS_SUPPORTING_SNAPSHOTS_CHAINING = List.of(Hypervisor.HypervisorType.XenServer);
|
||||
|
||||
@Inject
|
||||
protected SnapshotDao snapshotDao;
|
||||
|
||||
@Inject
|
||||
protected ImageStoreDao imageStoreDao;
|
||||
|
||||
private static final String FIND_OLDEST_OR_LATEST_SNAPSHOT = "select store_id, store_role, snapshot_id from cloud.snapshot_store_ref where " +
|
||||
" store_role = ? and volume_id = ? and state = 'Ready'" +
|
||||
" order by created %s " +
|
||||
" limit 1";
|
||||
|
||||
private static final String FIND_SNAPSHOT_IN_ZONE = "SELECT ssr.* FROM " +
|
||||
"snapshot_store_ref ssr, snapshots s " +
|
||||
"WHERE ssr.snapshot_id=? AND ssr.snapshot_id = s.id AND s.data_center_id=?;";
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
|
|
@ -119,10 +135,10 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
|
|||
stateSearch.done();
|
||||
|
||||
|
||||
idStateNeqSearch = createSearchBuilder();
|
||||
idStateNeqSearch.and(SNAPSHOT_ID, idStateNeqSearch.entity().getSnapshotId(), SearchCriteria.Op.EQ);
|
||||
idStateNeqSearch.and(STATE, idStateNeqSearch.entity().getState(), SearchCriteria.Op.NEQ);
|
||||
idStateNeqSearch.done();
|
||||
idStateNinSearch = createSearchBuilder();
|
||||
idStateNinSearch.and(SNAPSHOT_ID, idStateNinSearch.entity().getSnapshotId(), SearchCriteria.Op.EQ);
|
||||
idStateNinSearch.and(STATE, idStateNinSearch.entity().getState(), SearchCriteria.Op.NOTIN);
|
||||
idStateNinSearch.done();
|
||||
|
||||
snapshotVOSearch = snapshotDao.createSearchBuilder();
|
||||
snapshotVOSearch.and(VOLUME_ID, snapshotVOSearch.entity().getVolumeId(), SearchCriteria.Op.EQ);
|
||||
|
|
@ -151,6 +167,26 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
|
|||
storeSnapshotDownloadStatusSearch.and("downloadState", storeSnapshotDownloadStatusSearch.entity().getDownloadState(), SearchCriteria.Op.IN);
|
||||
storeSnapshotDownloadStatusSearch.done();
|
||||
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull = createSearchBuilder();
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.and(VOLUME_ID, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.entity().getVolumeId(), SearchCriteria.Op.EQ);
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.and(STATE, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.entity().getState(), SearchCriteria.Op.EQ);
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.and(STORE_ROLE, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.entity().getRole(), SearchCriteria.Op.EQ);
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.and(KVM_CHECKPOINT_PATH, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.entity().getKvmCheckpointPath(), SearchCriteria.Op.NNULL);
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.and(STORE_ID, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.entity().getDataStoreId(), SearchCriteria.Op.IN);
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.done();
|
||||
|
||||
searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore = createSearchBuilder();
|
||||
searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.and(STATE, searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.entity().getState(), SearchCriteria.Op.EQ);
|
||||
searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.and(DOWNLOAD_URL, searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.entity().getExtractUrl(), SearchCriteria.Op.NNULL);
|
||||
searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.and(URL_CREATED_BEFORE, searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.entity().getExtractUrlCreated(), SearchCriteria.Op.LT);
|
||||
searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.done();
|
||||
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq = createSearchBuilder();
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.and(STATE, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.entity().getState(), SearchCriteria.Op.EQ);
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.and(VOLUME_ID, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.entity().getVolumeId(), SearchCriteria.Op.EQ);
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.and(STORE_ROLE, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.entity().getRole(), SearchCriteria.Op.EQ);
|
||||
searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.and(STORE_ID, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.entity().getDataStoreId(), SearchCriteria.Op.IN);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -283,26 +319,86 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
|
|||
@Override
|
||||
@DB
|
||||
public SnapshotDataStoreVO findParent(DataStoreRole role, Long storeId, Long volumeId) {
|
||||
if (!isSnapshotChainingRequired(volumeId)) {
|
||||
return findParent(role, storeId, null, volumeId, false, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public SnapshotDataStoreVO findParent(DataStoreRole role, Long storeId, Long zoneId, Long volumeId, boolean kvmIncrementalSnapshot, Hypervisor.HypervisorType hypervisorType) {
|
||||
if (!isSnapshotChainingRequired(volumeId, kvmIncrementalSnapshot)) {
|
||||
logger.trace(String.format("Snapshot chaining is not required for snapshots of volume [%s]. Returning null as parent.", volumeId));
|
||||
return null;
|
||||
}
|
||||
|
||||
SearchCriteria<SnapshotDataStoreVO> sc = searchFilteringStoreIdEqStateEqStoreRoleEqIdEqUpdateCountEqSnapshotIdEqVolumeIdEq.create();
|
||||
SearchCriteria<SnapshotDataStoreVO> sc;
|
||||
if (kvmIncrementalSnapshot && Hypervisor.HypervisorType.KVM.equals(hypervisorType)) {
|
||||
sc = searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.create();
|
||||
} else {
|
||||
sc = searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.create();
|
||||
}
|
||||
|
||||
sc.setParameters(VOLUME_ID, volumeId);
|
||||
sc.setParameters(STORE_ROLE, role.toString());
|
||||
if (role != null) {
|
||||
sc.setParameters(STORE_ROLE, role.toString());
|
||||
}
|
||||
sc.setParameters(STATE, ObjectInDataStoreStateMachine.State.Ready.name());
|
||||
sc.setParameters(STORE_ID, storeId);
|
||||
if (storeId != null) {
|
||||
sc.setParameters(STORE_ID, new Long[]{storeId});
|
||||
} else if (zoneId != null) {
|
||||
List<ImageStoreVO> imageStores = imageStoreDao.listStoresByZoneId(zoneId);
|
||||
Object[] imageStoreIds = imageStores.stream().map(ImageStoreVO::getId).toArray();
|
||||
sc.setParameters(STORE_ID, imageStoreIds);
|
||||
}
|
||||
|
||||
List<SnapshotDataStoreVO> snapshotList = listBy(sc, new Filter(SnapshotDataStoreVO.class, CREATED, false, null, null));
|
||||
if (CollectionUtils.isNotEmpty(snapshotList)) {
|
||||
return snapshotList.get(0);
|
||||
if (CollectionUtils.isEmpty(snapshotList)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
SnapshotDataStoreVO parent = snapshotList.get(0);
|
||||
|
||||
if (kvmIncrementalSnapshot && parent.getKvmCheckpointPath() == null && Hypervisor.HypervisorType.KVM.equals(hypervisorType)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return parent;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnapshotDataStoreVO findBySnapshotIdAndDataStoreRoleAndState(long snapshotId, DataStoreRole role, State state) {
|
||||
SearchCriteria<SnapshotDataStoreVO> sc = createSearchCriteriaBySnapshotIdAndStoreRole(snapshotId, role);
|
||||
sc.setParameters(STATE, state);
|
||||
return findOneBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnapshotDataStoreVO findOneBySnapshotId(long snapshotId, long zoneId) {
|
||||
try (TransactionLegacy transactionLegacy = TransactionLegacy.currentTxn()) {
|
||||
try (PreparedStatement preparedStatement = transactionLegacy.prepareStatement(FIND_SNAPSHOT_IN_ZONE)) {
|
||||
preparedStatement.setLong(1, snapshotId);
|
||||
preparedStatement.setLong(2, zoneId);
|
||||
|
||||
try (ResultSet resultSet = preparedStatement.executeQuery()) {
|
||||
if (resultSet.next()) {
|
||||
return toEntityBean(resultSet, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
logger.warn(String.format("Failed to find %s snapshot in zone %s due to [%s].", snapshotId, zoneId, e.getMessage()), e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SnapshotDataStoreVO> listBySnapshot(long snapshotId, DataStoreRole role) {
|
||||
public List<SnapshotDataStoreVO> listBySnapshotId(long snapshotId) {
|
||||
SearchCriteria<SnapshotDataStoreVO> sc = searchFilteringStoreIdEqStateEqStoreRoleEqIdEqUpdateCountEqSnapshotIdEqVolumeIdEq.create();
|
||||
sc.setParameters(SNAPSHOT_ID, snapshotId);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SnapshotDataStoreVO> listBySnapshotAndDataStoreRole(long snapshotId, DataStoreRole role) {
|
||||
SearchCriteria<SnapshotDataStoreVO> sc = createSearchCriteriaBySnapshotIdAndStoreRole(snapshotId, role);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
|
@ -340,9 +436,17 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
|
|||
|
||||
@Override
|
||||
public List<SnapshotDataStoreVO> findBySnapshotId(long snapshotId) {
|
||||
SearchCriteria<SnapshotDataStoreVO> sc = idStateNeqSearch.create();
|
||||
SearchCriteria<SnapshotDataStoreVO> sc = idStateNinSearch.create();
|
||||
sc.setParameters(SNAPSHOT_ID, snapshotId);
|
||||
sc.setParameters(STATE, State.Destroyed);
|
||||
sc.setParameters(STATE, State.Destroyed.name());
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SnapshotDataStoreVO> findBySnapshotIdAndNotInDestroyedHiddenState(long snapshotId) {
|
||||
SearchCriteria<SnapshotDataStoreVO> sc = idStateNinSearch.create();
|
||||
sc.setParameters(SNAPSHOT_ID, snapshotId);
|
||||
sc.setParameters(STATE, State.Destroyed.name(), State.Hidden.name());
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
|
@ -485,13 +589,35 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
|
|||
return sc;
|
||||
}
|
||||
|
||||
protected boolean isSnapshotChainingRequired(long volumeId) {
|
||||
protected boolean isSnapshotChainingRequired(long volumeId, boolean kvmIncrementalSnapshot) {
|
||||
SearchCriteria<SnapshotVO> sc = snapshotVOSearch.create();
|
||||
sc.setParameters(VOLUME_ID, volumeId);
|
||||
|
||||
SnapshotVO snapshot = snapshotDao.findOneBy(sc);
|
||||
|
||||
return snapshot != null && HYPERVISORS_SUPPORTING_SNAPSHOTS_CHAINING.contains(snapshot.getHypervisorType());
|
||||
if (snapshot == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Hypervisor.HypervisorType hypervisorType = snapshot.getHypervisorType();
|
||||
return HYPERVISORS_SUPPORTING_SNAPSHOTS_CHAINING.contains(hypervisorType) || (Hypervisor.HypervisorType.KVM.equals(hypervisorType) && kvmIncrementalSnapshot);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SnapshotDataStoreVO> listReadyByVolumeIdAndCheckpointPathNotNull(long volumeId) {
|
||||
SearchCriteria<SnapshotDataStoreVO> sc = searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.create();
|
||||
sc.setParameters(VOLUME_ID, volumeId);
|
||||
sc.setParameters(STATE, State.Ready);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SnapshotDataStoreVO> listExtractedSnapshotsBeforeDate(Date beforeDate) {
|
||||
SearchCriteria<SnapshotDataStoreVO> sc = searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.create();
|
||||
sc.setParameters(URL_CREATED_BEFORE, beforeDate);
|
||||
sc.setParameters(STATE, State.Ready);
|
||||
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -29,6 +29,8 @@ import javax.persistence.Table;
|
|||
import javax.persistence.Temporal;
|
||||
import javax.persistence.TemporalType;
|
||||
|
||||
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||
import org.apache.commons.lang3.BooleanUtils;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
|
|
@ -80,12 +82,18 @@ public class SnapshotDataStoreVO implements StateObject<ObjectInDataStoreStateMa
|
|||
@Column(name = "parent_snapshot_id")
|
||||
private long parentSnapshotId;
|
||||
|
||||
@Column(name = "end_of_chain")
|
||||
private Boolean endOfChain;
|
||||
|
||||
@Column(name = "job_id")
|
||||
private String jobId;
|
||||
|
||||
@Column(name = "install_path")
|
||||
private String installPath;
|
||||
|
||||
@Column(name = "kvm_checkpoint_path")
|
||||
private String kvmCheckpointPath;
|
||||
|
||||
@Column(name = "download_url", length = 2048)
|
||||
private String extractUrl;
|
||||
|
||||
|
|
@ -226,14 +234,7 @@ public class SnapshotDataStoreVO implements StateObject<ObjectInDataStoreStateMa
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringBuilder("SnapshotDataStore[").append(id)
|
||||
.append("-")
|
||||
.append(snapshotId)
|
||||
.append("-")
|
||||
.append(dataStoreId)
|
||||
.append(installPath)
|
||||
.append("]")
|
||||
.toString();
|
||||
return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "snapshotId", "dataStoreId", "state", "installPath", "kvmCheckpointPath");
|
||||
}
|
||||
|
||||
public long getUpdatedCount() {
|
||||
|
|
@ -376,4 +377,20 @@ public class SnapshotDataStoreVO implements StateObject<ObjectInDataStoreStateMa
|
|||
public void setDisplay(boolean display) {
|
||||
this.display = display;
|
||||
}
|
||||
|
||||
public String getKvmCheckpointPath() {
|
||||
return kvmCheckpointPath;
|
||||
}
|
||||
|
||||
public void setKvmCheckpointPath(String kvmCheckpointPath) {
|
||||
this.kvmCheckpointPath = kvmCheckpointPath;
|
||||
}
|
||||
|
||||
public boolean isEndOfChain() {
|
||||
return BooleanUtils.toBoolean(endOfChain);
|
||||
}
|
||||
|
||||
public void setEndOfChain(boolean endOfChain) {
|
||||
this.endOfChain = endOfChain;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,6 +54,59 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.storage_pool', 'used_iops', 'bigint
|
|||
-- Add reason column for op_ha_work
|
||||
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.op_ha_work', 'reason', 'varchar(32) DEFAULT NULL COMMENT "Reason for the HA work"');
|
||||
|
||||
-- Support for XCP-ng 8.3.0 and XenServer 8.4 by adding hypervisor capabilities
|
||||
-- https://docs.xenserver.com/en-us/xenserver/8/system-requirements/configuration-limits.html
|
||||
-- https://docs.xenserver.com/en-us/citrix-hypervisor/system-requirements/configuration-limits.html
|
||||
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) VALUES (UUID(), 'XenServer', '8.3.0', 1000, 254, 64, 1);
|
||||
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) VALUES (UUID(), 'XenServer', '8.4.0', 1000, 240, 64, 1);
|
||||
|
||||
-- Add missing and new Guest OS mappings
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (64-bit)', 'XenServer', '8.2.1', 'Debian Buster 10');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 15 (64-bit)', 'XenServer', '8.2.1', 'SUSE Linux Enterprise 15 (64-bit)');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2022 (64-bit)', 'XenServer', '8.2.1', 'Windows Server 2022 (64-bit)');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows 11 (64-bit)', 'XenServer', '8.2.1', 'Windows 11');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 20.04 LTS', 'XenServer', '8.2.1', 'Ubuntu Focal Fossa 20.04');
|
||||
|
||||
-- Copy XS 8.2.1 hypervisor guest OS mappings to XS 8.3 and 8.3 mappings to 8.4
|
||||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.3.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='8.2.1';
|
||||
|
||||
-- Add new and missing guest os mappings for XS 8.3
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Rocky Linux 9', 'XenServer', '8.3.0', 'Rocky Linux 9');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Rocky Linux 8', 'XenServer', '8.3.0', 'Rocky Linux 8');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'AlmaLinux 9', 'XenServer', '8.3.0', 'AlmaLinux 9');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'AlmaLinux 8', 'XenServer', '8.3.0', 'AlmaLinux 8');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 12 (64-bit)', 'XenServer', '8.3.0', 'Debian Bookworm 12');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 9', 'XenServer', '8.3.0', 'Oracle Linux 9');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 8', 'XenServer', '8.3.0', 'Oracle Linux 8');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Red Hat Enterprise Linux 8.0', 'XenServer', '8.3.0', 'Red Hat Enterprise Linux 8');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Red Hat Enterprise Linux 9.0', 'XenServer', '8.3.0', 'Red Hat Enterprise Linux 9');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 22.04 LTS', 'XenServer', '8.3.0', 'Ubuntu Jammy Jellyfish 22.04');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 12 SP5 (64-bit)', 'XenServer', '8.3.0', 'SUSE Linux Enterprise Server 12 SP5 (64-bit');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'NeoKylin Linux Server 7', 'XenServer', '8.3.0', 'NeoKylin Linux Server 7');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS Stream 9', 'XenServer', '8.3.0', 'CentOS Stream 9');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Scientific Linux 7', 'XenServer', '8.3.0', 'Scientific Linux 7');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (7, 'Generic Linux UEFI', 'XenServer', '8.3.0', 'Generic Linux UEFI');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (7, 'Generic Linux BIOS', 'XenServer', '8.3.0', 'Generic Linux BIOS');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Gooroom Platform 2.0', 'XenServer', '8.3.0', 'Gooroom Platform 2.0');
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.4.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='8.3.0';
|
||||
|
||||
-- Add new guest os mappings for XS 8.4 and KVM
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2025', 'XenServer', '8.4.0', 'Windows Server 2025');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 24.04 LTS', 'XenServer', '8.4.0', 'Ubuntu Noble Numbat 24.04');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (64-bit)', 'KVM', 'default', 'Debian GNU/Linux 10 (64-bit)');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 11 (64-bit)', 'KVM', 'default', 'Debian GNU/Linux 11 (64-bit)');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 12 (64-bit)', 'KVM', 'default', 'Debian GNU/Linux 12 (64-bit)');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows 11 (64-bit)', 'KVM', 'default', 'Windows 11');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2025', 'KVM', 'default', 'Windows Server 2025');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 24.04 LTS', 'KVM', 'default', 'Ubuntu 24.04 LTS');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS Stream 10 (preview)', 'XenServer', '8.4.0', 'CentOS Stream 10 (preview)');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS Stream 9', 'XenServer', '8.4.0', 'CentOS Stream 9');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Scientific Linux 7', 'XenServer', '8.4.0', 'Scientific Linux 7');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'NeoKylin Linux Server 7', 'XenServer', '8.4.0', 'NeoKylin Linux Server 7');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 12 SP5 (64-bit)', 'XenServer', '8.4.0', 'SUSE Linux Enterprise Server 12 SP5 (64-bit');
|
||||
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Gooroom Platform 2.0', 'XenServer', '8.4.0', 'Gooroom Platform 2.0');
|
||||
|
||||
-- Grant access to 2FA APIs for the "Read-Only User - Default" role
|
||||
|
||||
CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'setupUserTwoFactorAuthentication', 'ALLOW');
|
||||
|
|
@ -75,3 +128,6 @@ CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only Admin - Default', 'va
|
|||
|
||||
CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support Admin - Default', 'setupUserTwoFactorAuthentication', 'ALLOW');
|
||||
CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support Admin - Default', 'validateUserTwoFactorAuthenticationCode', 'ALLOW');
|
||||
|
||||
-- Re-apply VPC: update default network offering for vpc tier to conserve_mode=1 (#8309)
|
||||
UPDATE `cloud`.`network_offerings` SET conserve_mode=1 WHERE name='DefaultIsolatedNetworkOfferingForVpcNetworks';
|
||||
|
|
|
|||
|
|
@ -94,3 +94,8 @@ CREATE TABLE IF NOT EXISTS `cloud`.`reconcile_commands` (
|
|||
INDEX `i_reconcile_command__host_id`(`host_id`),
|
||||
CONSTRAINT `fk_reconcile_command__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
--- KVM Incremental Snapshots
|
||||
|
||||
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'kvm_checkpoint_path', 'varchar(255)');
|
||||
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'end_of_chain', 'int(1) unsigned');
|
||||
|
|
|
|||
|
|
@ -0,0 +1,20 @@
|
|||
-- Licensed to the Apache Software Foundation (ASF) under one
|
||||
-- or more contributor license agreements. See the NOTICE file
|
||||
-- distributed with this work for additional information
|
||||
-- regarding copyright ownership. The ASF licenses this file
|
||||
-- to you under the Apache License, Version 2.0 (the
|
||||
-- "License"); you may not use this file except in compliance
|
||||
-- with the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing,
|
||||
-- software distributed under the License is distributed on an
|
||||
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
-- KIND, either express or implied. See the License for the
|
||||
-- specific language governing permissions and limitations
|
||||
-- under the License.
|
||||
|
||||
--;
|
||||
-- Schema upgrade from 4.20.1.0 to 4.21.0.0
|
||||
--;
|
||||
|
|
@ -16,31 +16,44 @@
|
|||
// under the License.
|
||||
package com.cloud.capacity.dao;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.any;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.capacity.CapacityVO;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.capacity.CapacityVO;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.Mockito.any;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class CapacityDaoImplTest {
|
||||
|
|
@ -48,6 +61,14 @@ public class CapacityDaoImplTest {
|
|||
@InjectMocks
|
||||
CapacityDaoImpl capacityDao = new CapacityDaoImpl();
|
||||
|
||||
@Mock
|
||||
private TransactionLegacy txn;
|
||||
@Mock
|
||||
private PreparedStatement pstmt;
|
||||
@Mock
|
||||
private ResultSet resultSet;
|
||||
private MockedStatic<TransactionLegacy> mockedTransactionLegacy;
|
||||
|
||||
private SearchBuilder<CapacityVO> searchBuilder;
|
||||
private SearchCriteria<CapacityVO> searchCriteria;
|
||||
|
||||
|
|
@ -59,6 +80,16 @@ public class CapacityDaoImplTest {
|
|||
searchCriteria = mock(SearchCriteria.class);
|
||||
doReturn(searchBuilder).when(capacityDao).createSearchBuilder();
|
||||
when(searchBuilder.create()).thenReturn(searchCriteria);
|
||||
|
||||
mockedTransactionLegacy = Mockito.mockStatic(TransactionLegacy.class);
|
||||
mockedTransactionLegacy.when(TransactionLegacy::currentTxn).thenReturn(txn);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
if (mockedTransactionLegacy != null) {
|
||||
mockedTransactionLegacy.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -96,4 +127,207 @@ public class CapacityDaoImplTest {
|
|||
verify(capacityDao).listBy(searchCriteria);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListClustersCrossingThresholdEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
List<Long> result = capacityDao.listClustersCrossingThreshold((short)1, 1L, "cpu.threshold", 5000L);
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindCapacityByZoneAndHostTagNoResults() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
Ternary<Long, Long, Long> result = capacityDao.findCapacityByZoneAndHostTag(1L, "host-tag");
|
||||
assertNotNull(result);
|
||||
assertEquals(Long.valueOf(0L), result.first());
|
||||
assertEquals(Long.valueOf(0L), result.second());
|
||||
assertEquals(Long.valueOf(0L), result.third());
|
||||
}
|
||||
@Test
|
||||
public void testFindByHostIdType() {
|
||||
CapacityVO capacity = new CapacityVO();
|
||||
capacity.setHostId(1L);
|
||||
capacity.setCapacityType((short) 1);
|
||||
|
||||
doReturn(capacity).when(capacityDao).findOneBy(any());
|
||||
|
||||
CapacityVO found = capacityDao.findByHostIdType(1L, (short) 1);
|
||||
assertNotNull(found);
|
||||
assertEquals(Long.valueOf(1L), found.getHostOrPoolId());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateAllocatedAddition() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
doNothing().when(txn).start();
|
||||
when(txn.commit()).thenReturn(true);
|
||||
|
||||
capacityDao.updateAllocated(1L, 1000L, (short)1, true);
|
||||
|
||||
verify(txn, times(1)).start();
|
||||
verify(txn, times(1)).commit();
|
||||
verify(pstmt, times(1)).executeUpdate();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateAllocatedSubtraction() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
doNothing().when(txn).start();
|
||||
when(txn.commit()).thenReturn(true);
|
||||
|
||||
capacityDao.updateAllocated(1L, 500L, (short)1, false);
|
||||
|
||||
verify(txn, times(1)).start();
|
||||
verify(txn, times(1)).commit();
|
||||
verify(pstmt, times(1)).executeUpdate();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindFilteredCapacityByEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
List<CapacityDaoImpl.SummedCapacity> result = capacityDao.findFilteredCapacityBy(null, null, null, null, Collections.emptyList(), Collections.emptyList());
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListClustersInZoneOrPodByHostCapacitiesEmpty() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
List<Long> resultZone = capacityDao.listClustersInZoneOrPodByHostCapacities(1L, 123L, 2, 2048L, (short)0, true);
|
||||
assertNotNull(resultZone);
|
||||
assertTrue(resultZone.isEmpty());
|
||||
|
||||
List<Long> resultPod = capacityDao.listClustersInZoneOrPodByHostCapacities(1L, 123L, 2, 2048L, (short)0, false);
|
||||
assertNotNull(resultPod);
|
||||
assertTrue(resultPod.isEmpty());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testListHostsWithEnoughCapacityEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
List<Long> result = capacityDao.listHostsWithEnoughCapacity(1, 100L, 200L, Host.Type.Routing.toString());
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testOrderClustersByAggregateCapacityEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderClustersByAggregateCapacity(1L, 1L, (short) 1, true);
|
||||
assertNotNull(result);
|
||||
assertTrue(result.first().isEmpty());
|
||||
assertTrue(result.second().isEmpty());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testOrderPodsByAggregateCapacityEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderPodsByAggregateCapacity(1L, (short) 1);
|
||||
assertNotNull(result);
|
||||
assertTrue(result.first().isEmpty());
|
||||
assertTrue(result.second().isEmpty());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testUpdateCapacityState() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeUpdate()).thenReturn(1);
|
||||
|
||||
capacityDao.updateCapacityState(1L, 1L, 1L, 1L, "Enabled", new short[]{1});
|
||||
|
||||
verify(pstmt, times(1)).executeUpdate();
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testFindClusterConsumption() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(true);
|
||||
when(resultSet.getFloat(1)).thenReturn(0.5f);
|
||||
|
||||
float result = capacityDao.findClusterConsumption(1L, (short) 1, 1000L);
|
||||
assertEquals(0.5f, result, 0.0f);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListPodsByHostCapacitiesEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
List<Long> result = capacityDao.listPodsByHostCapacities(1L, 2, 1024L, (short)0);
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOrderHostsByFreeCapacityEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderHostsByFreeCapacity(1L, 1L, (short) 0);
|
||||
assertNotNull(result);
|
||||
assertTrue(result.first().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindByClusterPodZoneEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
List<CapacityDaoImpl.SummedCapacity> result = capacityDao.findByClusterPodZone(1L, 1L, 1L);
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListCapacitiesGroupedByLevelAndTypeEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
List<CapacityDaoImpl.SummedCapacity> result = capacityDao.listCapacitiesGroupedByLevelAndType(0, 1L,
|
||||
1L, 1L, 0, Collections.emptyList(), Collections.emptyList(), 1L);
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindCapacityByEmptyResult() throws Exception {
|
||||
when(txn.prepareAutoCloseStatement(anyString())).thenReturn(pstmt);
|
||||
when(pstmt.executeQuery()).thenReturn(resultSet);
|
||||
when(resultSet.next()).thenReturn(false);
|
||||
|
||||
List<CapacityDaoImpl.SummedCapacity> result = capacityDao.findCapacityBy(1, 1L, 1L, 1L);
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -126,7 +126,26 @@ public class ConfigDriveBuilder {
|
|||
|
||||
File openStackFolder = new File(tempDirName + ConfigDrive.openStackConfigDriveName);
|
||||
|
||||
writeVendorEmptyJsonFile(openStackFolder);
|
||||
/*
|
||||
Try to find VM password in the vmData.
|
||||
If it is found, then write it into vendor-data.json
|
||||
*/
|
||||
String vmPassword = "";
|
||||
for (String[] item : vmData) {
|
||||
String dataType = item[CONFIGDATA_DIR];
|
||||
String fileName = item[CONFIGDATA_FILE];
|
||||
String content = item[CONFIGDATA_CONTENT];
|
||||
if (PASSWORD_FILE.equals(fileName)) {
|
||||
vmPassword = content;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (vmPassword.equals("")) {
|
||||
writeVendorDataJsonFile(openStackFolder);
|
||||
} else {
|
||||
writeVendorDataJsonFile(openStackFolder, vmPassword);
|
||||
}
|
||||
|
||||
writeNetworkData(nics, supportedServices, openStackFolder);
|
||||
for (NicProfile nic: nics) {
|
||||
if (supportedServices.get(nic.getId()).contains(Network.Service.UserData)) {
|
||||
|
|
@ -253,7 +272,7 @@ public class ConfigDriveBuilder {
|
|||
*
|
||||
* If the folder does not exist, and we cannot create it, we throw a {@link CloudRuntimeException}.
|
||||
*/
|
||||
static void writeVendorEmptyJsonFile(File openStackFolder) {
|
||||
static void writeVendorDataJsonFile(File openStackFolder) {
|
||||
if (openStackFolder.exists() || openStackFolder.mkdirs()) {
|
||||
writeFile(openStackFolder, "vendor_data.json", "{}");
|
||||
} else {
|
||||
|
|
@ -261,6 +280,26 @@ public class ConfigDriveBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes vendor data containing Cloudstack-generated password into vendor-data.json
|
||||
*
|
||||
* If the folder does not exist, and we cannot create it, we throw a {@link CloudRuntimeException}.
|
||||
*/
|
||||
static void writeVendorDataJsonFile(File openStackFolder, String password) {
|
||||
if (openStackFolder.exists() || openStackFolder.mkdirs()) {
|
||||
writeFile(
|
||||
openStackFolder,
|
||||
"vendor_data.json",
|
||||
String.format(
|
||||
"{\"cloud-init\": \"#cloud-config\\npassword: %s\\nchpasswd:\\n expire: False\"}",
|
||||
password
|
||||
)
|
||||
);
|
||||
} else {
|
||||
throw new CloudRuntimeException("Failed to create folder " + openStackFolder);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the {@link JsonObject} with VM's metadata. The vmData is a list of arrays; we expect this list to have the following entries:
|
||||
* <ul>
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ public class ConfigDriveBuilderTest {
|
|||
@Test(expected = CloudRuntimeException.class)
|
||||
public void buildConfigDriveTestIoException() {
|
||||
try (MockedStatic<ConfigDriveBuilder> configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) {
|
||||
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorEmptyJsonFile(nullable(File.class))).thenThrow(CloudRuntimeException.class);
|
||||
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorDataJsonFile(nullable(File.class))).thenThrow(CloudRuntimeException.class);
|
||||
Mockito.when(ConfigDriveBuilder.buildConfigDrive(null, new ArrayList<>(), "teste", "C:", null, supportedServices)).thenCallRealMethod();
|
||||
ConfigDriveBuilder.buildConfigDrive(null, new ArrayList<>(), "teste", "C:", null, supportedServices);
|
||||
}
|
||||
|
|
@ -144,7 +144,7 @@ public class ConfigDriveBuilderTest {
|
|||
public void buildConfigDriveTest() {
|
||||
try (MockedStatic<ConfigDriveBuilder> configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) {
|
||||
|
||||
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorEmptyJsonFile(Mockito.any(File.class))).then(invocationOnMock -> null);
|
||||
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorDataJsonFile(Mockito.any(File.class))).then(invocationOnMock -> null);
|
||||
|
||||
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVmMetadata(Mockito.anyList(), Mockito.anyString(), Mockito.any(File.class), anyMap())).then(invocationOnMock -> null);
|
||||
|
||||
|
|
@ -163,7 +163,7 @@ public class ConfigDriveBuilderTest {
|
|||
Assert.assertEquals("mockIsoDataBase64", returnedIsoData);
|
||||
|
||||
configDriveBuilderMocked.verify(() -> {
|
||||
ConfigDriveBuilder.writeVendorEmptyJsonFile(Mockito.any(File.class));
|
||||
ConfigDriveBuilder.writeVendorDataJsonFile(Mockito.any(File.class));
|
||||
ConfigDriveBuilder.writeVmMetadata(Mockito.anyList(), Mockito.anyString(), Mockito.any(File.class), anyMap());
|
||||
ConfigDriveBuilder.linkUserData(Mockito.anyString());
|
||||
ConfigDriveBuilder.generateAndRetrieveIsoAsBase64Iso(Mockito.anyString(), Mockito.anyString(), Mockito.anyString());
|
||||
|
|
@ -172,23 +172,23 @@ public class ConfigDriveBuilderTest {
|
|||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void writeVendorEmptyJsonFileTestCannotCreateOpenStackFolder() {
|
||||
public void writeVendorDataJsonFileTestCannotCreateOpenStackFolder() {
|
||||
File folderFileMock = Mockito.mock(File.class);
|
||||
Mockito.doReturn(false).when(folderFileMock).mkdirs();
|
||||
|
||||
ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock);
|
||||
ConfigDriveBuilder.writeVendorDataJsonFile(folderFileMock);
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void writeVendorEmptyJsonFileTest() {
|
||||
public void writeVendorDataJsonFileTest() {
|
||||
File folderFileMock = Mockito.mock(File.class);
|
||||
Mockito.doReturn(false).when(folderFileMock).mkdirs();
|
||||
|
||||
ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock);
|
||||
ConfigDriveBuilder.writeVendorDataJsonFile(folderFileMock);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void writeVendorEmptyJsonFileTestCreatingFolder() {
|
||||
public void writeVendorDataJsonFileTestCreatingFolder() {
|
||||
try (MockedStatic<ConfigDriveBuilder> configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) {
|
||||
|
||||
File folderFileMock = Mockito.mock(File.class);
|
||||
|
|
@ -196,9 +196,9 @@ public class ConfigDriveBuilderTest {
|
|||
Mockito.doReturn(true).when(folderFileMock).mkdirs();
|
||||
|
||||
//force execution of real method
|
||||
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock)).thenCallRealMethod();
|
||||
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorDataJsonFile(folderFileMock)).thenCallRealMethod();
|
||||
|
||||
ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock);
|
||||
ConfigDriveBuilder.writeVendorDataJsonFile(folderFileMock);
|
||||
|
||||
Mockito.verify(folderFileMock).exists();
|
||||
Mockito.verify(folderFileMock).mkdirs();
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import java.util.Objects;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.utils.db.TransactionCallback;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
|
||||
|
|
@ -68,7 +69,6 @@ import com.cloud.storage.dao.SnapshotZoneDao;
|
|||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.TransactionCallbackNoReturn;
|
||||
|
|
@ -100,7 +100,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
@Inject
|
||||
SnapshotZoneDao snapshotZoneDao;
|
||||
|
||||
private final List<Snapshot.State> snapshotStatesAbleToDeleteSnapshot = Arrays.asList(Snapshot.State.Destroying, Snapshot.State.Destroyed, Snapshot.State.Error);
|
||||
private final List<Snapshot.State> snapshotStatesAbleToDeleteSnapshot = Arrays.asList(Snapshot.State.Destroying, Snapshot.State.Destroyed, Snapshot.State.Error, Snapshot.State.Hidden);
|
||||
|
||||
public SnapshotDataStoreVO getSnapshotImageStoreRef(long snapshotId, long zoneId) {
|
||||
List<SnapshotDataStoreVO> snaps = snapshotStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image);
|
||||
|
|
@ -161,13 +161,11 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
VolumeVO volume = volumeDao.findById(snapshot.getVolumeId());
|
||||
if (oldestSnapshotOnPrimary != null) {
|
||||
if (oldestSnapshotOnPrimary.getDataStoreId() == volume.getPoolId() && oldestSnapshotOnPrimary.getId() != parentSnapshotOnPrimaryStore.getId()) {
|
||||
int _deltaSnapshotMax = NumbersUtil.parseInt(configDao.getValue("snapshot.delta.max"),
|
||||
SnapshotManager.DELTAMAX);
|
||||
int deltaSnap = _deltaSnapshotMax;
|
||||
int deltaSnap = SnapshotManager.snapshotDeltaMax.value();
|
||||
int i;
|
||||
|
||||
for (i = 1; i < deltaSnap; i++) {
|
||||
Long prevBackupId = parentSnapshotOnBackupStore.getParentSnapshotId();
|
||||
long prevBackupId = parentSnapshotOnBackupStore.getParentSnapshotId();
|
||||
if (prevBackupId == 0) {
|
||||
break;
|
||||
}
|
||||
|
|
@ -177,11 +175,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
}
|
||||
}
|
||||
|
||||
if (i >= deltaSnap) {
|
||||
fullBackup = true;
|
||||
} else {
|
||||
fullBackup = false;
|
||||
}
|
||||
fullBackup = i >= deltaSnap;
|
||||
} else if (oldestSnapshotOnPrimary.getId() != parentSnapshotOnPrimaryStore.getId()){
|
||||
// if there is an snapshot entry for previousPool(primary storage) of migrated volume, delete it because CS created one more snapshot entry for current pool
|
||||
snapshotStoreDao.remove(oldestSnapshotOnPrimary.getId());
|
||||
|
|
@ -204,7 +198,10 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
SnapshotInfo child = snapshot.getChild();
|
||||
|
||||
if (child != null) {
|
||||
logger.debug(String.format("Snapshot [%s] has child [%s], not deleting it on the storage [%s]", snapshotTo, child.getTO(), storageToString));
|
||||
logger.debug(String.format("Snapshot [%s] has child [%s], not deleting it on the storage [%s], will only set it as hidden.", snapshotTo, child.getTO(), storageToString));
|
||||
SnapshotDataStoreVO snapshotDataStoreVo = snapshotStoreDao.findByStoreSnapshot(snapshot.getDataStore().getRole(), snapshot.getDataStore().getId(), snapshot.getSnapshotId());
|
||||
snapshotDataStoreVo.setState(State.Hidden);
|
||||
snapshotStoreDao.update(snapshotDataStoreVo.getId(), snapshotDataStoreVo);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -213,6 +210,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
SnapshotInfo parent = snapshot.getParent();
|
||||
boolean deleted = false;
|
||||
if (parent != null) {
|
||||
logger.debug("Snapshot [{}] has parent [{}].", snapshot, parent);
|
||||
if (parent.getPath() != null && parent.getPath().equalsIgnoreCase(snapshot.getPath())) {
|
||||
//NOTE: if both snapshots share the same path, it's for xenserver's empty delta snapshot. We can't delete the snapshot on the backend, as parent snapshot still reference to it
|
||||
//Instead, mark it as destroyed in the db.
|
||||
|
|
@ -226,6 +224,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
}
|
||||
|
||||
if (!deleted) {
|
||||
logger.debug("Deleting snapshot [{}].", snapshot);
|
||||
try {
|
||||
boolean r = snapshotSvr.deleteSnapshot(snapshot);
|
||||
if (r) {
|
||||
|
|
@ -242,6 +241,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
}
|
||||
} catch (Exception e) {
|
||||
logger.error(String.format("Failed to delete snapshot [%s] on storage [%s] due to [%s].", snapshotTo, storageToString, e.getMessage()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -249,10 +249,26 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
} while (snapshot != null && snapshotStatesAbleToDeleteSnapshot.contains(snapshot.getState()));
|
||||
} catch (Exception e) {
|
||||
logger.error(String.format("Failed to delete snapshot [%s] on storage [%s] due to [%s].", snapshotTo, storageToString, e.getMessage()), e);
|
||||
throw new CloudRuntimeException("Failed to delete snapshot chain.");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private Long getRootSnapshotId(SnapshotVO snapshotVO) {
|
||||
List<SnapshotDataStoreVO> snapshotDataStoreVOList = snapshotStoreDao.findBySnapshotId(snapshotVO.getSnapshotId());
|
||||
|
||||
long parentId = snapshotDataStoreVOList.stream().
|
||||
map(SnapshotDataStoreVO::getParentSnapshotId).
|
||||
filter(parentSnapshotId -> parentSnapshotId != 0).findFirst().orElse(0L);
|
||||
while (parentId != 0) {
|
||||
snapshotDataStoreVOList = snapshotStoreDao.findBySnapshotId(parentId);
|
||||
parentId = snapshotDataStoreVOList.stream().
|
||||
map(SnapshotDataStoreVO::getParentSnapshotId).
|
||||
filter(parentSnapshotId -> parentSnapshotId != 0).findFirst().orElse(0L);
|
||||
}
|
||||
return snapshotDataStoreVOList.stream().map(SnapshotDataStoreVO::getSnapshotId).findFirst().orElse(0L);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteSnapshot(Long snapshotId, Long zoneId) {
|
||||
SnapshotVO snapshotVO = snapshotDao.findById(snapshotId);
|
||||
|
|
@ -316,6 +332,9 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
} else {
|
||||
snapshotZoneDao.removeSnapshotFromZones(snapshotVo.getId());
|
||||
}
|
||||
|
||||
updateEndOfChainIfNeeded(snapshotVo);
|
||||
|
||||
if (CollectionUtils.isNotEmpty(retrieveSnapshotEntries(snapshotVo.getId(), null))) {
|
||||
return true;
|
||||
}
|
||||
|
|
@ -323,6 +342,37 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* If using the KVM hypervisor and the snapshot was the end of a chain, will mark their parents as end of chain.
|
||||
* */
|
||||
protected void updateEndOfChainIfNeeded(SnapshotVO snapshotVo) {
|
||||
if (!HypervisorType.KVM.equals(snapshotVo.getHypervisorType())) {
|
||||
return;
|
||||
}
|
||||
|
||||
SnapshotDataStoreVO snapshotDataStoreVo = snapshotStoreDao.findBySnapshotIdAndDataStoreRoleAndState(snapshotVo.getSnapshotId(), DataStoreRole.Image, State.Destroyed);
|
||||
|
||||
if (snapshotDataStoreVo == null) {
|
||||
snapshotDataStoreVo = snapshotStoreDao.findBySnapshotIdAndDataStoreRoleAndState(snapshotVo.getSnapshotId(), DataStoreRole.Primary, State.Destroyed);
|
||||
}
|
||||
|
||||
// Snapshot is hidden, no need to update endOfChain
|
||||
if (snapshotDataStoreVo == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!snapshotDataStoreVo.isEndOfChain() || snapshotDataStoreVo.getParentSnapshotId() <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
List<SnapshotDataStoreVO> parentSnapshotDataStoreVoList = findLastAliveAncestors(snapshotDataStoreVo.getParentSnapshotId());
|
||||
|
||||
for (SnapshotDataStoreVO parentSnapshotDatastoreVo : parentSnapshotDataStoreVoList) {
|
||||
parentSnapshotDatastoreVo.setEndOfChain(true);
|
||||
snapshotStoreDao.update(parentSnapshotDatastoreVo.getId(), parentSnapshotDatastoreVo);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the snapshot to {@link Snapshot.State#Destroyed}.
|
||||
*/
|
||||
|
|
@ -331,17 +381,34 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
snapshotDao.update(snapshotVo.getId(), snapshotVo);
|
||||
}
|
||||
|
||||
protected boolean deleteSnapshotInfos(SnapshotVO snapshotVo, Long zoneId) {
|
||||
List<SnapshotInfo> snapshotInfos = retrieveSnapshotEntries(snapshotVo.getId(), zoneId);
|
||||
|
||||
boolean result = false;
|
||||
for (var snapshotInfo : snapshotInfos) {
|
||||
if (BooleanUtils.toBooleanDefaultIfNull(deleteSnapshotInfo(snapshotInfo, snapshotVo), false)) {
|
||||
result = true;
|
||||
}
|
||||
protected List<SnapshotDataStoreVO> findLastAliveAncestors(long snapshotId) {
|
||||
List<SnapshotDataStoreVO> parentSnapshotDataStoreVoList = snapshotStoreDao.listBySnapshotId(snapshotId);
|
||||
if (CollectionUtils.isEmpty(parentSnapshotDataStoreVoList)) {
|
||||
return parentSnapshotDataStoreVoList;
|
||||
}
|
||||
if (parentSnapshotDataStoreVoList.stream().anyMatch(snapshotDataStoreVO -> State.Ready.equals(snapshotDataStoreVO.getState()))) {
|
||||
return parentSnapshotDataStoreVoList;
|
||||
}
|
||||
return findLastAliveAncestors(parentSnapshotDataStoreVoList.get(0).getParentSnapshotId());
|
||||
}
|
||||
|
||||
return result;
|
||||
protected boolean deleteSnapshotInfos(SnapshotVO snapshotVo, Long zoneId) {
|
||||
return Transaction.execute((TransactionCallback<Boolean>) status -> {
|
||||
long rootSnapshotId = getRootSnapshotId(snapshotVo);
|
||||
snapshotDao.acquireInLockTable(rootSnapshotId);
|
||||
|
||||
List<SnapshotInfo> snapshotInfos = retrieveSnapshotEntries(snapshotVo.getId(), zoneId);
|
||||
logger.debug("Found {} snapshot references to delete.", snapshotInfos);
|
||||
|
||||
boolean result = false;
|
||||
for (var snapshotInfo : snapshotInfos) {
|
||||
if (BooleanUtils.toBooleanDefaultIfNull(deleteSnapshotInfo(snapshotInfo, snapshotVo), false)) {
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
snapshotDao.releaseFromLockTable(rootSnapshotId);
|
||||
return result;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -351,59 +418,31 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
protected Boolean deleteSnapshotInfo(SnapshotInfo snapshotInfo, SnapshotVO snapshotVo) {
|
||||
DataStore dataStore = snapshotInfo.getDataStore();
|
||||
String storageToString = String.format("%s {uuid: \"%s\", name: \"%s\"}", dataStore.getRole().name(), dataStore.getUuid(), dataStore.getName());
|
||||
List<SnapshotDataStoreVO> snapshotStoreRefs = snapshotStoreDao.findBySnapshotId(snapshotVo.getId());
|
||||
List<SnapshotDataStoreVO> snapshotStoreRefs = snapshotStoreDao.findBySnapshotIdAndNotInDestroyedHiddenState(snapshotVo.getId());
|
||||
boolean isLastSnapshotRef = CollectionUtils.isEmpty(snapshotStoreRefs) || snapshotStoreRefs.size() == 1;
|
||||
try {
|
||||
SnapshotObject snapshotObject = castSnapshotInfoToSnapshotObject(snapshotInfo);
|
||||
if (isLastSnapshotRef) {
|
||||
snapshotObject.processEvent(Snapshot.Event.DestroyRequested);
|
||||
}
|
||||
if (!DataStoreRole.Primary.equals(dataStore.getRole())) {
|
||||
verifyIfTheSnapshotIsBeingUsedByAnyVolume(snapshotObject);
|
||||
if (deleteSnapshotChain(snapshotInfo, storageToString)) {
|
||||
logger.debug(String.format("%s was deleted on %s. We will mark the snapshot as destroyed.", snapshotVo, storageToString));
|
||||
} else {
|
||||
logger.debug(String.format("%s was not deleted on %s; however, we will mark the snapshot as destroyed for future garbage collecting.", snapshotVo,
|
||||
storageToString));
|
||||
}
|
||||
snapshotStoreDao.updateDisplayForSnapshotStoreRole(snapshotVo.getId(), dataStore.getId(), dataStore.getRole(), false);
|
||||
if (isLastSnapshotRef) {
|
||||
snapshotObject.processEvent(Snapshot.Event.OperationSucceeded);
|
||||
}
|
||||
return true;
|
||||
} else if (deleteSnapshotInPrimaryStorage(snapshotInfo, snapshotVo, storageToString, snapshotObject, isLastSnapshotRef)) {
|
||||
snapshotStoreDao.updateDisplayForSnapshotStoreRole(snapshotVo.getId(), dataStore.getId(), dataStore.getRole(), false);
|
||||
return true;
|
||||
verifyIfTheSnapshotIsBeingUsedByAnyVolume(snapshotObject);
|
||||
if (deleteSnapshotChain(snapshotInfo, storageToString)) {
|
||||
logger.debug(String.format("%s was deleted on %s. We will mark the snapshot as destroyed.", snapshotVo, storageToString));
|
||||
} else {
|
||||
logger.debug(String.format("%s was not deleted on %s; however, we will mark the snapshot as hidden for future garbage collecting.", snapshotVo,
|
||||
storageToString));
|
||||
}
|
||||
logger.debug(String.format("Failed to delete %s on %s.", snapshotVo, storageToString));
|
||||
snapshotStoreDao.updateDisplayForSnapshotStoreRole(snapshotVo.getId(), dataStore.getId(), dataStore.getRole(), false);
|
||||
if (isLastSnapshotRef) {
|
||||
snapshotObject.processEvent(Snapshot.Event.OperationFailed);
|
||||
snapshotObject.processEvent(Snapshot.Event.OperationSucceeded);
|
||||
}
|
||||
return true;
|
||||
} catch (NoTransitionException ex) {
|
||||
logger.warn(String.format("Failed to delete %s on %s due to %s.", snapshotVo, storageToString, ex.getMessage()), ex);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
protected boolean deleteSnapshotInPrimaryStorage(SnapshotInfo snapshotInfo, SnapshotVO snapshotVo,
|
||||
String storageToString, SnapshotObject snapshotObject, boolean isLastSnapshotRef) throws NoTransitionException {
|
||||
try {
|
||||
if (snapshotSvr.deleteSnapshot(snapshotInfo)) {
|
||||
String msg = String.format("%s was deleted on %s.", snapshotVo, storageToString);
|
||||
if (isLastSnapshotRef) {
|
||||
msg = String.format("%s We will mark the snapshot as destroyed.", msg);
|
||||
snapshotObject.processEvent(Snapshot.Event.OperationSucceeded);
|
||||
}
|
||||
logger.debug(msg);
|
||||
return true;
|
||||
}
|
||||
} catch (CloudRuntimeException ex) {
|
||||
logger.warn(String.format("Unable do delete snapshot %s on %s due to [%s]. The reference will be marked as 'Destroying' for future garbage collecting.",
|
||||
snapshotVo, storageToString, ex.getMessage()), ex);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
protected void verifyIfTheSnapshotIsBeingUsedByAnyVolume(SnapshotObject snapshotObject) throws NoTransitionException {
|
||||
List<VolumeDetailVO> volumesFromSnapshot = _volumeDetailsDaoImpl.findDetails("SNAPSHOT_ID", String.valueOf(snapshotObject.getSnapshotId()), null);
|
||||
if (CollectionUtils.isEmpty(volumesFromSnapshot)) {
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
|
|||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
|
|
@ -65,8 +64,11 @@ public class SnapshotObject implements SnapshotInfo {
|
|||
protected Logger logger = LogManager.getLogger(getClass());
|
||||
private SnapshotVO snapshot;
|
||||
private DataStore store;
|
||||
private DataStore imageStore;
|
||||
private Object payload;
|
||||
private Boolean fullBackup;
|
||||
private String checkpointPath;
|
||||
private boolean kvmIncrementalSnapshot = false;
|
||||
private String url;
|
||||
@Inject
|
||||
protected SnapshotDao snapshotDao;
|
||||
|
|
@ -111,12 +113,14 @@ public class SnapshotObject implements SnapshotInfo {
|
|||
|
||||
@Override
|
||||
public SnapshotInfo getParent() {
|
||||
|
||||
logger.trace("Searching for parents of snapshot [{}], in store [{}] with role [{}].", snapshot.getSnapshotId(), store.getId(), store.getRole());
|
||||
SnapshotDataStoreVO snapStoreVO = snapshotStoreDao.findByStoreSnapshot(store.getRole(), store.getId(), snapshot.getId());
|
||||
Long parentId = null;
|
||||
if (snapStoreVO != null) {
|
||||
parentId = snapStoreVO.getParentSnapshotId();
|
||||
if (parentId != null && parentId != 0) {
|
||||
long parentId = snapStoreVO.getParentSnapshotId();
|
||||
if (parentId != 0) {
|
||||
if (HypervisorType.KVM.equals(snapshot.getHypervisorType())) {
|
||||
return getCorrectIncrementalParent(parentId);
|
||||
}
|
||||
return snapshotFactory.getSnapshot(parentId, store);
|
||||
}
|
||||
}
|
||||
|
|
@ -124,6 +128,30 @@ public class SnapshotObject implements SnapshotInfo {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the snapshotInfo of the passed snapshot parentId. Will search for the snapshot reference which has a checkpoint path. If none is found, throws an exception.
|
||||
* */
|
||||
protected SnapshotInfo getCorrectIncrementalParent(long parentId) {
|
||||
List<SnapshotDataStoreVO> parentSnapshotDatastoreVos = snapshotStoreDao.findBySnapshotId(parentId);
|
||||
|
||||
if (parentSnapshotDatastoreVos.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
logger.debug("Found parent snapshot references {}, will filter to just one.", parentSnapshotDatastoreVos);
|
||||
|
||||
SnapshotDataStoreVO parent = parentSnapshotDatastoreVos.stream().filter(snapshotDataStoreVO -> snapshotDataStoreVO.getKvmCheckpointPath() != null)
|
||||
.findFirst().
|
||||
orElseThrow(() -> new CloudRuntimeException(String.format("Could not find snapshot parent with id [%s]. None of the records have a checkpoint path.", parentId)));
|
||||
|
||||
SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(parentId, parent.getDataStoreId(), parent.getRole());
|
||||
snapshotInfo.setKvmIncrementalSnapshot(parent.getKvmCheckpointPath() != null);
|
||||
|
||||
logger.debug("Filtered snapshot references {} to just {}.", parentSnapshotDatastoreVos, parent);
|
||||
|
||||
return snapshotInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnapshotInfo getChild() {
|
||||
QueryBuilder<SnapshotDataStoreVO> sc = QueryBuilder.create(SnapshotDataStoreVO.class);
|
||||
|
|
@ -216,6 +244,16 @@ public class SnapshotObject implements SnapshotInfo {
|
|||
return store;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStore getImageStore() {
|
||||
return imageStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setImageStore(DataStore imageStore) {
|
||||
this.imageStore = imageStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getSize() {
|
||||
return snapshot.getSize();
|
||||
|
|
@ -453,6 +491,26 @@ public class SnapshotObject implements SnapshotInfo {
|
|||
return fullBackup;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCheckpointPath() {
|
||||
return checkpointPath;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setCheckpointPath(String checkpointPath) {
|
||||
this.checkpointPath = checkpointPath;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setKvmIncrementalSnapshot(boolean isKvmIncrementalSnapshot) {
|
||||
this.kvmIncrementalSnapshot = isKvmIncrementalSnapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isKvmIncrementalSnapshot() {
|
||||
return kvmIncrementalSnapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean delete() {
|
||||
if (store != null) {
|
||||
|
|
@ -468,8 +526,7 @@ public class SnapshotObject implements SnapshotInfo {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("SnapshotObject %s",
|
||||
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
|
||||
this, "snapshot", "store"));
|
||||
return String.format("%s, dataStoreId %s, imageStore id %s, checkpointPath %s.", snapshot, store != null? store.getId() : 0,
|
||||
imageStore != null ? imageStore.getId() : 0, checkpointPath);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,14 @@ import java.util.concurrent.ExecutionException;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.agent.api.ConvertSnapshotAnswer;
|
||||
import com.cloud.agent.api.ConvertSnapshotCommand;
|
||||
import com.cloud.agent.api.RemoveBitmapCommand;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService;
|
||||
|
|
@ -38,6 +46,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StorageAction;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
|
|
@ -50,6 +59,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJob;
|
|||
import org.apache.cloudstack.secstorage.heuristics.HeuristicType;
|
||||
import org.apache.cloudstack.storage.command.CommandResult;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
||||
import org.apache.cloudstack.storage.command.QuerySnapshotZoneCopyAnswer;
|
||||
import org.apache.cloudstack.storage.command.QuerySnapshotZoneCopyCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
|
|
@ -57,6 +67,8 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
|||
import org.apache.cloudstack.storage.heuristics.HeuristicRuleHelper;
|
||||
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.commons.lang3.BooleanUtils;
|
||||
import org.apache.commons.lang3.math.NumberUtils;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
|
|
@ -101,6 +113,8 @@ public class SnapshotServiceImpl implements SnapshotService {
|
|||
EndPointSelector epSelector;
|
||||
@Inject
|
||||
ConfigurationDao _configDao;
|
||||
@Inject
|
||||
HostDao hostDao;
|
||||
|
||||
@Inject
|
||||
private HeuristicRuleHelper heuristicRuleHelper;
|
||||
|
|
@ -232,27 +246,27 @@ public class SnapshotServiceImpl implements SnapshotService {
|
|||
public SnapshotResult takeSnapshot(SnapshotInfo snap) {
|
||||
SnapshotObject snapshot = (SnapshotObject)snap;
|
||||
|
||||
SnapshotObject snapshotOnPrimary = null;
|
||||
SnapshotObject snapshotOnPrimaryStorage = null;
|
||||
try {
|
||||
snapshotOnPrimary = (SnapshotObject)snap.getDataStore().create(snapshot);
|
||||
snapshotOnPrimaryStorage = (SnapshotObject)snap.getDataStore().create(snapshot);
|
||||
} catch (Exception e) {
|
||||
logger.debug("Failed to create snapshot state on data store due to " + e.getMessage());
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
|
||||
try {
|
||||
snapshotOnPrimary.processEvent(Snapshot.Event.CreateRequested);
|
||||
snapshotOnPrimaryStorage.processEvent(Snapshot.Event.CreateRequested);
|
||||
} catch (NoTransitionException e) {
|
||||
logger.debug("Failed to change snapshot state: " + e.toString());
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
|
||||
try {
|
||||
snapshotOnPrimary.processEvent(Event.CreateOnlyRequested);
|
||||
snapshotOnPrimaryStorage.processEvent(Event.CreateOnlyRequested);
|
||||
} catch (Exception e) {
|
||||
logger.debug("Failed to change snapshot state: " + e.toString());
|
||||
try {
|
||||
snapshotOnPrimary.processEvent(Snapshot.Event.OperationFailed);
|
||||
snapshotOnPrimaryStorage.processEvent(Snapshot.Event.OperationFailed);
|
||||
} catch (NoTransitionException e1) {
|
||||
logger.debug("Failed to change snapshot state: " + e1.toString());
|
||||
}
|
||||
|
|
@ -261,10 +275,10 @@ public class SnapshotServiceImpl implements SnapshotService {
|
|||
|
||||
AsyncCallFuture<SnapshotResult> future = new AsyncCallFuture<SnapshotResult>();
|
||||
try {
|
||||
CreateSnapshotContext<CommandResult> context = new CreateSnapshotContext<CommandResult>(null, snap.getBaseVolume(), snapshotOnPrimary, future);
|
||||
CreateSnapshotContext<CommandResult> context = new CreateSnapshotContext<CommandResult>(null, snap.getBaseVolume(), snapshotOnPrimaryStorage, future);
|
||||
AsyncCallbackDispatcher<SnapshotServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
|
||||
caller.setCallback(caller.getTarget().createSnapshotAsyncCallback(null, null)).setContext(context);
|
||||
PrimaryDataStoreDriver primaryStore = (PrimaryDataStoreDriver)snapshotOnPrimary.getDataStore().getDriver();
|
||||
PrimaryDataStoreDriver primaryStore = (PrimaryDataStoreDriver)snapshotOnPrimaryStorage.getDataStore().getDriver();
|
||||
primaryStore.takeSnapshot(snapshot, caller);
|
||||
} catch (Exception e) {
|
||||
logger.debug("Failed to take snapshot: {}", snapshot, e);
|
||||
|
|
@ -281,22 +295,56 @@ public class SnapshotServiceImpl implements SnapshotService {
|
|||
|
||||
try {
|
||||
result = future.get();
|
||||
|
||||
updateSnapSizeAndCheckpointPathIfPossible(result, snap);
|
||||
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_ON_PRIMARY, snap.getAccountId(), snap.getDataCenterId(), snap.getId(),
|
||||
snap.getName(), null, null, snapshotOnPrimary.getSize(), snapshotOnPrimary.getSize(), snap.getClass().getName(), snap.getUuid());
|
||||
snap.getName(), null, null, snapshotOnPrimaryStorage.getSize(), snapshotOnPrimaryStorage.getSize(), snap.getClass().getName(), snap.getUuid());
|
||||
return result;
|
||||
} catch (InterruptedException e) {
|
||||
logger.debug("Failed to create snapshot", e);
|
||||
throw new CloudRuntimeException("Failed to create snapshot", e);
|
||||
} catch (ExecutionException e) {
|
||||
logger.debug("Failed to create snapshot", e);
|
||||
throw new CloudRuntimeException("Failed to create snapshot", e);
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
String message = String.format("Failed to create snapshot [%s] due to [%s].", snapshot, e.getMessage());
|
||||
logger.error(message, e);
|
||||
throw new CloudRuntimeException(message, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the snapshot physical size if the answer is an instance of CreateObjectAnswer and the returned physical size if bigger than 0.
|
||||
* Also updates the checkpoint path if possible.
|
||||
* */
|
||||
protected void updateSnapSizeAndCheckpointPathIfPossible(SnapshotResult result, SnapshotInfo snapshotInfo) {
|
||||
SnapshotDataStoreVO snapshotStore;
|
||||
Answer answer = result.getAnswer();
|
||||
|
||||
if (!answer.getResult() || !(answer instanceof CreateObjectAnswer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
SnapshotInfo resultSnapshot = result.getSnapshot();
|
||||
if (snapshotInfo.getImageStore() != null) {
|
||||
snapshotInfo.getImageStore().create(resultSnapshot);
|
||||
snapshotStore = _snapshotStoreDao.findBySnapshotIdAndDataStoreRoleAndState(resultSnapshot.getSnapshotId(), DataStoreRole.Image, ObjectInDataStoreStateMachine.State.Allocated);
|
||||
} else {
|
||||
snapshotStore = _snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Primary, resultSnapshot.getDataStore().getId(), resultSnapshot.getSnapshotId());
|
||||
}
|
||||
|
||||
SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO) ((CreateObjectAnswer) answer).getData();
|
||||
|
||||
Long physicalSize = snapshotObjectTo.getPhysicalSize();
|
||||
if (NumberUtils.compare(physicalSize, 0L) > 0) {
|
||||
snapshotStore.setPhysicalSize(physicalSize);
|
||||
}
|
||||
|
||||
snapshotStore.setKvmCheckpointPath(snapshotObjectTo.getCheckpointPath());
|
||||
_snapshotStoreDao.update(snapshotStore.getId(), snapshotStore);
|
||||
}
|
||||
|
||||
|
||||
// if a snapshot has parent snapshot, the new snapshot should be stored in
|
||||
// the same store as its parent since
|
||||
// we are taking delta snapshot
|
||||
private DataStore findSnapshotImageStore(SnapshotInfo snapshot) {
|
||||
@Override
|
||||
public DataStore findSnapshotImageStore(SnapshotInfo snapshot) {
|
||||
Boolean fullSnapshot = true;
|
||||
Boolean snapshotFullBackup = snapshot.getFullBackup();
|
||||
if (snapshotFullBackup != null) {
|
||||
|
|
@ -339,6 +387,49 @@ public class SnapshotServiceImpl implements SnapshotService {
|
|||
return imageStore;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a given snapshot that is on the secondary storage. The original and its backing chains will be maintained, the converted snapshot must be later deleted if not used.
|
||||
* The original purpose of this method is to work with KVM incremental snapshots, copying the snapshot to a temporary location and consolidating the snapshot chain.
|
||||
* @param snapshotInfo The snapshot to be converted
|
||||
* @return the snapshotInfo given with the updated path. This should not be persisted on the DB, otherwise the original snapshot will be lost.
|
||||
* */
|
||||
@Override
|
||||
public SnapshotInfo convertSnapshot(SnapshotInfo snapshotInfo) {
|
||||
SnapshotObject snapObj = (SnapshotObject)snapshotInfo;
|
||||
|
||||
logger.debug("Converting snapshot [%s].", snapObj);
|
||||
Answer answer = null;
|
||||
try {
|
||||
snapObj.processEvent(Snapshot.Event.BackupToSecondary);
|
||||
|
||||
SnapshotObjectTO snapshotObjectTO = (SnapshotObjectTO) snapshotInfo.getTO();
|
||||
ConvertSnapshotCommand cmd = new ConvertSnapshotCommand(snapshotObjectTO);
|
||||
|
||||
EndPoint ep = epSelector.select(snapshotInfo, StorageAction.CONVERTSNAPSHOT);
|
||||
|
||||
answer = ep.sendMessage(cmd);
|
||||
|
||||
if (answer != null && answer.getResult()) {
|
||||
snapObj.setPath(((ConvertSnapshotAnswer) answer).getSnapshotObjectTO().getPath());
|
||||
return snapObj;
|
||||
}
|
||||
} catch (NoTransitionException e) {
|
||||
logger.debug("Failed to change snapshot {} state.", snapObj.getUuid(), e);
|
||||
} finally {
|
||||
try {
|
||||
if (answer != null && answer.getResult()) {
|
||||
snapObj.processEvent(Snapshot.Event.OperationSucceeded);
|
||||
} else {
|
||||
snapObj.processEvent(Snapshot.Event.OperationNotPerformed);
|
||||
}
|
||||
} catch (NoTransitionException ex) {
|
||||
logger.debug("Failed to change snapshot {} state.", snapObj.getUuid(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
throw new CloudRuntimeException(String.format("Failed to convert snapshot [%s]%s.", snapObj.getUuid(), answer != null ? String.format(" due to [%s]", answer.getDetails()) : ""));
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) {
|
||||
SnapshotObject snapObj = (SnapshotObject)snapshot;
|
||||
|
|
@ -404,7 +495,7 @@ public class SnapshotServiceImpl implements SnapshotService {
|
|||
SnapshotResult snapResult = new SnapshotResult(destSnapshot, result.getAnswer());
|
||||
if (result.isFailed()) {
|
||||
try {
|
||||
if (createSnapshotPayload.getAsyncBackup()) {
|
||||
if (BooleanUtils.isTrue(createSnapshotPayload.getAsyncBackup())) {
|
||||
_snapshotDao.remove(srcSnapshot.getId());
|
||||
destSnapshot.processEvent(Event.OperationFailed);
|
||||
throw new SnapshotBackupException(String.format("Failed in creating backup of snapshot %s", srcSnapshot));
|
||||
|
|
@ -528,6 +619,17 @@ public class SnapshotServiceImpl implements SnapshotService {
|
|||
public boolean deleteSnapshot(SnapshotInfo snapInfo) {
|
||||
snapInfo.processEvent(ObjectInDataStoreStateMachine.Event.DestroyRequested);
|
||||
|
||||
if (Hypervisor.HypervisorType.KVM.equals(snapInfo.getHypervisorType()) &&
|
||||
SnapshotManager.kvmIncrementalSnapshot.valueIn(hostDao.findClusterIdByVolumeInfo(snapInfo.getBaseVolume()))) {
|
||||
SnapshotDataStoreVO snapshotDataStoreVo = _snapshotStoreDao.findByStoreSnapshot(snapInfo.getDataStore().getRole(), snapInfo.getDataStore().getId(), snapInfo.getSnapshotId());
|
||||
String kvmCheckpointPath = snapshotDataStoreVo.getKvmCheckpointPath();
|
||||
if (kvmCheckpointPath != null) {
|
||||
snapInfo.setCheckpointPath(kvmCheckpointPath);
|
||||
snapInfo.setKvmIncrementalSnapshot(true);
|
||||
deleteBitmap(snapInfo);
|
||||
}
|
||||
}
|
||||
|
||||
AsyncCallFuture<SnapshotResult> future = new AsyncCallFuture<SnapshotResult>();
|
||||
DeleteSnapshotContext<CommandResult> context = new DeleteSnapshotContext<CommandResult>(null, snapInfo, future);
|
||||
AsyncCallbackDispatcher<SnapshotServiceImpl, CommandResult> caller = AsyncCallbackDispatcher.create(this);
|
||||
|
|
@ -551,6 +653,25 @@ public class SnapshotServiceImpl implements SnapshotService {
|
|||
return false;
|
||||
}
|
||||
|
||||
protected void deleteBitmap (SnapshotInfo snapshotInfo) {
|
||||
Volume baseVol = snapshotInfo.getBaseVolume();
|
||||
if (baseVol == null || !Volume.State.Ready.equals(baseVol.getState())) {
|
||||
return;
|
||||
}
|
||||
|
||||
VirtualMachine attachedVM = snapshotInfo.getBaseVolume().getAttachedVM();
|
||||
|
||||
RemoveBitmapCommand cmd = new RemoveBitmapCommand((SnapshotObjectTO) snapshotInfo.getTO(),
|
||||
attachedVM != null && attachedVM.getState().equals(VirtualMachine.State.Running));
|
||||
EndPoint ep = epSelector.select(snapshotInfo, StorageAction.REMOVEBITMAP);
|
||||
|
||||
Answer answer = ep.sendMessage(cmd);
|
||||
if (!answer.getResult()) {
|
||||
logger.error("Unable to remove bitmap associated with snapshot {} due to {}.", answer.getDetails());
|
||||
throw new CloudRuntimeException(String.format("Unable to remove bitmap associated with snapshot [%s].", snapshotInfo.getName()));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean revertSnapshot(SnapshotInfo snapshot) {
|
||||
PrimaryDataStore store = null;
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import org.apache.cloudstack.storage.datastore.api.SnapshotGroup;
|
|||
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
|
||||
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
|
||||
|
|
@ -301,7 +302,11 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot
|
|||
srcSnapshotDestVolumeMap.put(srcSnapshotVolumeId, destVolumeId);
|
||||
}
|
||||
|
||||
String systemId = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue();
|
||||
String systemId = null;
|
||||
StoragePoolDetailVO systemIdDetail = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
|
||||
if (systemIdDetail != null) {
|
||||
systemId = systemIdDetail.getValue();
|
||||
}
|
||||
if (systemId == null) {
|
||||
throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for reverting VM snapshot: " + vmSnapshot.getName());
|
||||
}
|
||||
|
|
@ -380,7 +385,11 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot
|
|||
try {
|
||||
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId());
|
||||
StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm);
|
||||
String systemId = storagePoolDetailsDao.findDetail(storagePool.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue();
|
||||
String systemId = null;
|
||||
StoragePoolDetailVO systemIdDetail = storagePoolDetailsDao.findDetail(storagePool.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
|
||||
if (systemIdDetail != null) {
|
||||
systemId = systemIdDetail.getValue();
|
||||
}
|
||||
if (systemId == null) {
|
||||
throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for deleting VM snapshot: " + vmSnapshot.getName());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.snapshot;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
|
|
@ -77,7 +78,7 @@ public class DefaultSnapshotStrategyTest {
|
|||
VolumeDetailsDao volumeDetailsDaoMock;
|
||||
|
||||
@Mock
|
||||
SnapshotService snapshotServiceMock;
|
||||
private SnapshotDataStoreVO snapshotDataStoreVOMock;
|
||||
|
||||
@Mock
|
||||
SnapshotZoneDao snapshotZoneDaoMock;
|
||||
|
|
@ -88,6 +89,9 @@ public class DefaultSnapshotStrategyTest {
|
|||
@Mock
|
||||
DataStoreManager dataStoreManager;
|
||||
|
||||
@Mock
|
||||
SnapshotService snapshotService;
|
||||
|
||||
List<SnapshotInfo> mockSnapshotInfos = new ArrayList<>();
|
||||
|
||||
@Before
|
||||
|
|
@ -106,6 +110,49 @@ public class DefaultSnapshotStrategyTest {
|
|||
Assert.assertTrue(result.contains(snapshotInfo2Mock));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateEndOfChainIfNeededTestNotKvm() {
|
||||
Mockito.doReturn(Hypervisor.HypervisorType.VMware).when(snapshotVoMock).getHypervisorType();
|
||||
|
||||
defaultSnapshotStrategySpy.updateEndOfChainIfNeeded(snapshotVoMock);
|
||||
|
||||
Mockito.verify(snapshotDataStoreDao, Mockito.never()).findBySnapshotIdAndDataStoreRoleAndState(Mockito.anyLong(), Mockito.any(), Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateEndOfChainIfNeededTestKvmAndIsNotEndOfChain() {
|
||||
Mockito.doReturn(Hypervisor.HypervisorType.KVM).when(snapshotVoMock).getHypervisorType();
|
||||
Mockito.doReturn(2L).when(snapshotVoMock).getSnapshotId();
|
||||
|
||||
SnapshotDataStoreVO snapshotDataStoreVO = new SnapshotDataStoreVO();
|
||||
snapshotDataStoreVO.setEndOfChain(false);
|
||||
Mockito.doReturn(snapshotDataStoreVO).when(snapshotDataStoreDao).findBySnapshotIdAndDataStoreRoleAndState(2, DataStoreRole.Image, ObjectInDataStoreStateMachine.State.Destroyed);
|
||||
|
||||
defaultSnapshotStrategySpy.updateEndOfChainIfNeeded(snapshotVoMock);
|
||||
|
||||
Mockito.verify(snapshotDataStoreDao, Mockito.never()).update(Mockito.anyLong(), Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateEndOfChainIfNeededTestKvmAndIsEndOfChain() {
|
||||
Mockito.doReturn(Hypervisor.HypervisorType.KVM).when(snapshotVoMock).getHypervisorType();
|
||||
Mockito.doReturn(2L).when(snapshotVoMock).getSnapshotId();
|
||||
|
||||
SnapshotDataStoreVO snapshotDataStoreVO = new SnapshotDataStoreVO();
|
||||
snapshotDataStoreVO.setEndOfChain(true);
|
||||
snapshotDataStoreVO.setParentSnapshotId(8);
|
||||
Mockito.doReturn(snapshotDataStoreVO).when(snapshotDataStoreDao).findBySnapshotIdAndDataStoreRoleAndState(2, DataStoreRole.Image, ObjectInDataStoreStateMachine.State.Destroyed);
|
||||
|
||||
Mockito.doReturn(ObjectInDataStoreStateMachine.State.Ready).when(snapshotDataStoreVOMock).getState();
|
||||
Mockito.doReturn(List.of(snapshotDataStoreVOMock)).when(snapshotDataStoreDao).listBySnapshotId(8);
|
||||
|
||||
defaultSnapshotStrategySpy.updateEndOfChainIfNeeded(snapshotVoMock);
|
||||
|
||||
Mockito.verify(snapshotDataStoreDao, Mockito.times(1)).listBySnapshotId(Mockito.anyLong());
|
||||
Mockito.verify(snapshotDataStoreVOMock).setEndOfChain(true);
|
||||
Mockito.verify(snapshotDataStoreDao, Mockito.times(1)).update(Mockito.anyLong(), Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void validateUpdateSnapshotToDestroyed() {
|
||||
Mockito.doReturn(true).when(snapshotDaoMock).update(Mockito.anyLong(), Mockito.any());
|
||||
|
|
@ -146,37 +193,12 @@ public class DefaultSnapshotStrategyTest {
|
|||
Mockito.doReturn(dataStoreMock).when(snapshotInfo1Mock).getDataStore();
|
||||
Mockito.doReturn(snapshotObjectMock).when(defaultSnapshotStrategySpy).castSnapshotInfoToSnapshotObject(snapshotInfo1Mock);
|
||||
Mockito.doNothing().when(snapshotObjectMock).processEvent(Mockito.any(Snapshot.Event.class));
|
||||
Mockito.doReturn(true).when(snapshotServiceMock).deleteSnapshot(Mockito.any());
|
||||
Mockito.when(dataStoreMock.getRole()).thenReturn(DataStoreRole.Primary);
|
||||
|
||||
boolean result = defaultSnapshotStrategySpy.deleteSnapshotInfo(snapshotInfo1Mock, snapshotVoMock);
|
||||
Assert.assertTrue(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteSnapshotInfoTestReturnFalseIfCannotDeleteTheSnapshotOnPrimaryStorage() throws NoTransitionException {
|
||||
Mockito.doReturn(dataStoreMock).when(snapshotInfo1Mock).getDataStore();
|
||||
Mockito.doReturn(snapshotObjectMock).when(defaultSnapshotStrategySpy).castSnapshotInfoToSnapshotObject(snapshotInfo1Mock);
|
||||
Mockito.doNothing().when(snapshotObjectMock).processEvent(Mockito.any(Snapshot.Event.class));
|
||||
Mockito.doReturn(false).when(snapshotServiceMock).deleteSnapshot(Mockito.any());
|
||||
Mockito.when(dataStoreMock.getRole()).thenReturn(DataStoreRole.Primary);
|
||||
|
||||
boolean result = defaultSnapshotStrategySpy.deleteSnapshotInfo(snapshotInfo1Mock, snapshotVoMock);
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteSnapshotInfoTestReturnFalseIfDeleteSnapshotOnPrimaryStorageThrowsACloudRuntimeException() throws NoTransitionException {
|
||||
Mockito.doReturn(dataStoreMock).when(snapshotInfo1Mock).getDataStore();
|
||||
Mockito.doReturn(snapshotObjectMock).when(defaultSnapshotStrategySpy).castSnapshotInfoToSnapshotObject(snapshotInfo1Mock);
|
||||
Mockito.doNothing().when(snapshotObjectMock).processEvent(Mockito.any(Snapshot.Event.class));
|
||||
Mockito.doThrow(CloudRuntimeException.class).when(snapshotServiceMock).deleteSnapshot(Mockito.any());
|
||||
Mockito.when(dataStoreMock.getRole()).thenReturn(DataStoreRole.Primary);
|
||||
|
||||
boolean result = defaultSnapshotStrategySpy.deleteSnapshotInfo(snapshotInfo1Mock, snapshotVoMock);
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteSnapshotInfoTestReturnTrueIfCanDeleteTheSnapshotChainForSecondaryStorage() throws NoTransitionException {
|
||||
Mockito.doReturn(dataStoreMock).when(snapshotInfo1Mock).getDataStore();
|
||||
|
|
@ -233,31 +255,6 @@ public class DefaultSnapshotStrategyTest {
|
|||
defaultSnapshotStrategySpy.verifyIfTheSnapshotIsBeingUsedByAnyVolume(snapshotObjectMock);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteSnapshotInPrimaryStorageTestReturnTrueIfDeleteReturnsTrue() throws NoTransitionException {
|
||||
Mockito.doReturn(true).when(snapshotServiceMock).deleteSnapshot(Mockito.any());
|
||||
Mockito.doNothing().when(snapshotObjectMock).processEvent(Mockito.any(Snapshot.Event.class));
|
||||
Assert.assertTrue(defaultSnapshotStrategySpy.deleteSnapshotInPrimaryStorage(null, null, null, snapshotObjectMock, true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteSnapshotInPrimaryStorageTestReturnTrueIfDeleteNotLastRefReturnsTrue() throws NoTransitionException {
|
||||
Mockito.doReturn(true).when(snapshotServiceMock).deleteSnapshot(Mockito.any());
|
||||
Assert.assertTrue(defaultSnapshotStrategySpy.deleteSnapshotInPrimaryStorage(null, null, null, snapshotObjectMock, false));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteSnapshotInPrimaryStorageTestReturnFalseIfDeleteReturnsFalse() throws NoTransitionException {
|
||||
Mockito.doReturn(false).when(snapshotServiceMock).deleteSnapshot(Mockito.any());
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.deleteSnapshotInPrimaryStorage(null, null, null, null, true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteSnapshotInPrimaryStorageTestReturnFalseIfDeleteThrowsException() throws NoTransitionException {
|
||||
Mockito.doThrow(CloudRuntimeException.class).when(snapshotServiceMock).deleteSnapshot(Mockito.any());
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.deleteSnapshotInPrimaryStorage(null, null, null, null, true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetSnapshotImageStoreRefNull() {
|
||||
SnapshotDataStoreVO ref1 = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
|
|
|
|||
|
|
@ -18,7 +18,9 @@
|
|||
*/
|
||||
package org.apache.cloudstack.storage.snapshot;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.ImageStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
||||
|
|
@ -30,7 +32,11 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
||||
import org.apache.cloudstack.secstorage.heuristics.HeuristicType;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.storage.heuristics.HeuristicRuleHelper;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
|
@ -61,7 +67,7 @@ public class SnapshotServiceImplTest {
|
|||
HeuristicRuleHelper heuristicRuleHelperMock;
|
||||
|
||||
@Mock
|
||||
SnapshotInfo snapshotMock;
|
||||
SnapshotInfo snapshotInfoMock;
|
||||
|
||||
@Mock
|
||||
VolumeInfo volumeInfoMock;
|
||||
|
|
@ -69,12 +75,34 @@ public class SnapshotServiceImplTest {
|
|||
@Mock
|
||||
DataStoreManager dataStoreManagerMock;
|
||||
|
||||
@Mock
|
||||
private SnapshotResult snapshotResultMock;
|
||||
|
||||
@Mock
|
||||
private CreateObjectAnswer createObjectAnswerMock;
|
||||
|
||||
@Mock
|
||||
private ImageStore imageStoreMock;
|
||||
|
||||
@Mock
|
||||
private SnapshotDataStoreVO snapshotDataStoreVoMock;
|
||||
|
||||
@Mock
|
||||
private SnapshotDataStoreDao snapshotDataStoreDaoMock;
|
||||
|
||||
@Mock
|
||||
private DataStore dataStoreMock;
|
||||
|
||||
@Mock
|
||||
private SnapshotObjectTO snapshotObjectTOMock;
|
||||
|
||||
|
||||
private static final long DUMMY_ID = 1L;
|
||||
|
||||
@Test
|
||||
public void testRevertSnapshotWithNoPrimaryStorageEntry() throws Exception {
|
||||
Mockito.when(snapshotMock.getId()).thenReturn(DUMMY_ID);
|
||||
Mockito.when(snapshotMock.getVolumeId()).thenReturn(DUMMY_ID);
|
||||
Mockito.when(snapshotInfoMock.getId()).thenReturn(DUMMY_ID);
|
||||
Mockito.when(snapshotInfoMock.getVolumeId()).thenReturn(DUMMY_ID);
|
||||
Mockito.when(_snapshotFactory.getSnapshotOnPrimaryStore(1L)).thenReturn(null);
|
||||
Mockito.when(volFactory.getVolume(DUMMY_ID, DataStoreRole.Primary)).thenReturn(volumeInfoMock);
|
||||
|
||||
|
|
@ -89,7 +117,7 @@ public class SnapshotServiceImplTest {
|
|||
Mockito.when(mock.get()).thenReturn(result);
|
||||
Mockito.when(result.isFailed()).thenReturn(false);
|
||||
})) {
|
||||
Assert.assertTrue(snapshotService.revertSnapshot(snapshotMock));
|
||||
Assert.assertTrue(snapshotService.revertSnapshot(snapshotInfoMock));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -97,9 +125,9 @@ public class SnapshotServiceImplTest {
|
|||
public void getImageStoreForSnapshotTestShouldListFreeImageStoresWithNoHeuristicRule() {
|
||||
Mockito.when(heuristicRuleHelperMock.getImageStoreIfThereIsHeuristicRule(Mockito.anyLong(), Mockito.any(HeuristicType.class), Mockito.any(SnapshotInfo.class))).
|
||||
thenReturn(null);
|
||||
Mockito.when(snapshotMock.getDataCenterId()).thenReturn(DUMMY_ID);
|
||||
Mockito.when(snapshotInfoMock.getDataCenterId()).thenReturn(DUMMY_ID);
|
||||
|
||||
snapshotService.getImageStoreForSnapshot(DUMMY_ID, snapshotMock);
|
||||
snapshotService.getImageStoreForSnapshot(DUMMY_ID, snapshotInfoMock);
|
||||
|
||||
Mockito.verify(dataStoreManagerMock, Mockito.times(1)).getImageStoreWithFreeCapacity(Mockito.anyLong());
|
||||
}
|
||||
|
|
@ -110,8 +138,123 @@ public class SnapshotServiceImplTest {
|
|||
Mockito.when(heuristicRuleHelperMock.getImageStoreIfThereIsHeuristicRule(Mockito.anyLong(), Mockito.any(HeuristicType.class), Mockito.any(SnapshotInfo.class))).
|
||||
thenReturn(dataStore);
|
||||
|
||||
snapshotService.getImageStoreForSnapshot(DUMMY_ID, snapshotMock);
|
||||
snapshotService.getImageStoreForSnapshot(DUMMY_ID, snapshotInfoMock);
|
||||
|
||||
Mockito.verify(dataStoreManagerMock, Mockito.times(0)).getImageStoreWithFreeCapacity(Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateSnapSizeAndCheckpointPathIfPossibleTestResultIsFalse() {
|
||||
Mockito.doReturn(createObjectAnswerMock).when(snapshotResultMock).getAnswer();
|
||||
Mockito.doReturn(false).when(createObjectAnswerMock).getResult();
|
||||
|
||||
snapshotService.updateSnapSizeAndCheckpointPathIfPossible(snapshotResultMock, snapshotInfoMock);
|
||||
|
||||
Mockito.verify(snapshotInfoMock, Mockito.never()).getImageStore();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateSnapSizeAndCheckpointPathIfPossibleTestResultIsTrueAnswerIsNotCreateObjectAnswer() {
|
||||
Answer answer = new Answer(null, true, null);
|
||||
|
||||
Mockito.doReturn(answer).when(snapshotResultMock).getAnswer();
|
||||
|
||||
snapshotService.updateSnapSizeAndCheckpointPathIfPossible(snapshotResultMock, snapshotInfoMock);
|
||||
|
||||
Mockito.verify(snapshotInfoMock, Mockito.never()).getImageStore();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateSnapSizeAndCheckpointPathIfPossibleTestResultIsTrueAnswerIsCreateObjectAnswerAndImageStoreIsNotNullAndPhysicalSizeIsZero() {
|
||||
Mockito.doReturn(createObjectAnswerMock).when(snapshotResultMock).getAnswer();
|
||||
Mockito.doReturn(true).when(createObjectAnswerMock).getResult();
|
||||
|
||||
Mockito.doReturn(snapshotInfoMock).when(snapshotResultMock).getSnapshot();
|
||||
|
||||
Mockito.doReturn(dataStoreMock).when(snapshotInfoMock).getImageStore();
|
||||
|
||||
Mockito.doReturn(snapshotDataStoreVoMock).when(snapshotDataStoreDaoMock).findBySnapshotIdAndDataStoreRoleAndState(Mockito.anyLong(), Mockito.any(), Mockito.any());
|
||||
|
||||
Mockito.doReturn(snapshotObjectTOMock).when(createObjectAnswerMock).getData();
|
||||
Mockito.doReturn("checkpath").when(snapshotObjectTOMock).getCheckpointPath();
|
||||
Mockito.doReturn(0L).when(snapshotObjectTOMock).getPhysicalSize();
|
||||
|
||||
snapshotService.updateSnapSizeAndCheckpointPathIfPossible(snapshotResultMock, snapshotInfoMock);
|
||||
|
||||
Mockito.verify(snapshotDataStoreVoMock).setKvmCheckpointPath("checkpath");
|
||||
Mockito.verify(snapshotDataStoreVoMock, Mockito.never()).setPhysicalSize(Mockito.anyLong());
|
||||
|
||||
Mockito.verify(snapshotDataStoreDaoMock).update(Mockito.anyLong(), Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateSnapSizeAndCheckpointPathIfPossibleTestResultIsTrueAnswerIsCreateObjectAnswerAndImageStoreIsNotNullAndPhysicalSizeGreaterThanZero() {
|
||||
Mockito.doReturn(createObjectAnswerMock).when(snapshotResultMock).getAnswer();
|
||||
Mockito.doReturn(true).when(createObjectAnswerMock).getResult();
|
||||
|
||||
Mockito.doReturn(snapshotInfoMock).when(snapshotResultMock).getSnapshot();
|
||||
|
||||
Mockito.doReturn(dataStoreMock).when(snapshotInfoMock).getImageStore();
|
||||
|
||||
Mockito.doReturn(snapshotDataStoreVoMock).when(snapshotDataStoreDaoMock).findBySnapshotIdAndDataStoreRoleAndState(Mockito.anyLong(), Mockito.any(), Mockito.any());
|
||||
|
||||
Mockito.doReturn(snapshotObjectTOMock).when(createObjectAnswerMock).getData();
|
||||
Mockito.doReturn("checkpath").when(snapshotObjectTOMock).getCheckpointPath();
|
||||
Mockito.doReturn(1000L).when(snapshotObjectTOMock).getPhysicalSize();
|
||||
|
||||
snapshotService.updateSnapSizeAndCheckpointPathIfPossible(snapshotResultMock, snapshotInfoMock);
|
||||
|
||||
Mockito.verify(snapshotDataStoreVoMock).setKvmCheckpointPath("checkpath");
|
||||
Mockito.verify(snapshotDataStoreVoMock).setPhysicalSize(1000L);
|
||||
|
||||
Mockito.verify(snapshotDataStoreDaoMock).update(Mockito.anyLong(), Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateSnapSizeAndCheckpointPathIfPossibleTestResultIsTrueAnswerIsCreateObjectAnswerAndImageStoreIsNullAndPhysicalSizeIsZero() {
|
||||
Mockito.doReturn(createObjectAnswerMock).when(snapshotResultMock).getAnswer();
|
||||
Mockito.doReturn(true).when(createObjectAnswerMock).getResult();
|
||||
|
||||
Mockito.doReturn(snapshotInfoMock).when(snapshotResultMock).getSnapshot();
|
||||
|
||||
Mockito.doReturn(null).when(snapshotInfoMock).getImageStore();
|
||||
Mockito.doReturn(dataStoreMock).when(snapshotInfoMock).getDataStore();
|
||||
|
||||
Mockito.doReturn(snapshotDataStoreVoMock).when(snapshotDataStoreDaoMock).findByStoreSnapshot(Mockito.any(), Mockito.anyLong(), Mockito.anyLong());
|
||||
|
||||
Mockito.doReturn(snapshotObjectTOMock).when(createObjectAnswerMock).getData();
|
||||
Mockito.doReturn("checkpath").when(snapshotObjectTOMock).getCheckpointPath();
|
||||
Mockito.doReturn(0L).when(snapshotObjectTOMock).getPhysicalSize();
|
||||
|
||||
snapshotService.updateSnapSizeAndCheckpointPathIfPossible(snapshotResultMock, snapshotInfoMock);
|
||||
|
||||
Mockito.verify(snapshotDataStoreVoMock).setKvmCheckpointPath("checkpath");
|
||||
Mockito.verify(snapshotDataStoreVoMock, Mockito.never()).setPhysicalSize(Mockito.anyLong());
|
||||
|
||||
Mockito.verify(snapshotDataStoreDaoMock).update(Mockito.anyLong(), Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateSnapSizeAndCheckpointPathIfPossibleTestResultIsTrueAnswerIsCreateObjectAnswerAndImageStoreIsNullAndPhysicalSizeGreaterThanZero() {
|
||||
Mockito.doReturn(createObjectAnswerMock).when(snapshotResultMock).getAnswer();
|
||||
Mockito.doReturn(true).when(createObjectAnswerMock).getResult();
|
||||
|
||||
Mockito.doReturn(snapshotInfoMock).when(snapshotResultMock).getSnapshot();
|
||||
|
||||
Mockito.doReturn(null).when(snapshotInfoMock).getImageStore();
|
||||
Mockito.doReturn(dataStoreMock).when(snapshotInfoMock).getDataStore();
|
||||
|
||||
Mockito.doReturn(snapshotDataStoreVoMock).when(snapshotDataStoreDaoMock).findByStoreSnapshot(Mockito.any(), Mockito.anyLong(), Mockito.anyLong());
|
||||
|
||||
Mockito.doReturn(snapshotObjectTOMock).when(createObjectAnswerMock).getData();
|
||||
Mockito.doReturn("checkpath").when(snapshotObjectTOMock).getCheckpointPath();
|
||||
Mockito.doReturn(1000L).when(snapshotObjectTOMock).getPhysicalSize();
|
||||
|
||||
snapshotService.updateSnapSizeAndCheckpointPathIfPossible(snapshotResultMock, snapshotInfoMock);
|
||||
|
||||
Mockito.verify(snapshotDataStoreVoMock).setKvmCheckpointPath("checkpath");
|
||||
Mockito.verify(snapshotDataStoreVoMock).setPhysicalSize(1000L);
|
||||
|
||||
Mockito.verify(snapshotDataStoreDaoMock).update(Mockito.anyLong(), Mockito.any());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,64 +16,68 @@
|
|||
// under the License.
|
||||
package org.apache.cloudstack.storage.allocator;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.api.query.dao.StoragePoolJoinDao;
|
||||
import com.cloud.exception.StorageUnavailableException;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.StoragePoolStatus;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
|
||||
import com.cloud.capacity.Capacity;
|
||||
import com.cloud.capacity.dao.CapacityDao;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.exception.StorageUnavailableException;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolStatus;
|
||||
import com.cloud.storage.StorageUtil;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.StringUtils;
|
||||
import com.cloud.utils.component.AdapterBase;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
import java.math.BigDecimal;
|
||||
import java.security.SecureRandom;
|
||||
import java.text.DecimalFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator {
|
||||
|
||||
protected BigDecimal storageOverprovisioningFactor = new BigDecimal(1);
|
||||
protected String allocationAlgorithm = "random";
|
||||
protected long extraBytesPerVolume = 0;
|
||||
static DecimalFormat decimalFormat = new DecimalFormat("#.##");
|
||||
@Inject protected DataStoreManager dataStoreMgr;
|
||||
@Inject protected PrimaryDataStoreDao storagePoolDao;
|
||||
@Inject protected VolumeDao volumeDao;
|
||||
@Inject protected ConfigurationDao configDao;
|
||||
@Inject private CapacityDao capacityDao;
|
||||
@Inject protected CapacityDao capacityDao;
|
||||
@Inject private ClusterDao clusterDao;
|
||||
@Inject private StorageManager storageMgr;
|
||||
@Inject private StorageUtil storageUtil;
|
||||
|
|
@ -95,10 +99,6 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
String globalStorageOverprovisioningFactor = configs.get("storage.overprovisioning.factor");
|
||||
storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
|
||||
extraBytesPerVolume = 0;
|
||||
String allocationAlgorithm = configs.get("vm.allocation.algorithm");
|
||||
if (allocationAlgorithm != null) {
|
||||
this.allocationAlgorithm = allocationAlgorithm;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
@ -142,12 +142,16 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
capacityType, storagePool.getName(), storagePool.getUuid(), storageType
|
||||
));
|
||||
|
||||
List<Long> poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
|
||||
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
|
||||
List<Long> poolIdsByCapacity = result.first();
|
||||
Map<Long, String> sortedHostByCapacity = result.second().entrySet()
|
||||
.stream()
|
||||
.sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, entry -> decimalFormat.format(entry.getValue() * 100) + "%", (e1, e2) -> e1, LinkedHashMap::new));
|
||||
logger.debug("List of pools in descending order of hostId: [{}] available capacity (percentage): {}",
|
||||
poolIdsByCapacity, sortedHostByCapacity);
|
||||
|
||||
logger.debug(String.format("List of pools in descending order of available capacity [%s].", poolIdsByCapacity));
|
||||
|
||||
|
||||
//now filter the given list of Pools by this ordered list
|
||||
// now filter the given list of Pools by this ordered list
|
||||
Map<Long, StoragePool> poolMap = new HashMap<>();
|
||||
for (StoragePool pool : pools) {
|
||||
poolMap.put(pool.getId(), pool);
|
||||
|
|
@ -227,16 +231,16 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
}
|
||||
|
||||
List<StoragePool> reorderStoragePoolsBasedOnAlgorithm(List<StoragePool> pools, DeploymentPlan plan, Account account) {
|
||||
logger.debug(String.format("Using allocation algorithm [%s] to reorder pools.", allocationAlgorithm));
|
||||
|
||||
if (allocationAlgorithm.equals("random") || allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
|
||||
String volumeAllocationAlgorithm = VolumeOrchestrationService.VolumeAllocationAlgorithm.value();
|
||||
logger.debug("Using volume allocation algorithm {} to reorder pools.", volumeAllocationAlgorithm);
|
||||
if (volumeAllocationAlgorithm.equals("random") || volumeAllocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
|
||||
reorderRandomPools(pools);
|
||||
} else if (StringUtils.equalsAny(allocationAlgorithm, "userdispersing", "firstfitleastconsumed")) {
|
||||
} else if (StringUtils.equalsAny(volumeAllocationAlgorithm, "userdispersing", "firstfitleastconsumed")) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(String.format("Using reordering algorithm [%s]", allocationAlgorithm));
|
||||
logger.trace("Using reordering algorithm {}", volumeAllocationAlgorithm);
|
||||
}
|
||||
|
||||
if (allocationAlgorithm.equals("userdispersing")) {
|
||||
if (volumeAllocationAlgorithm.equals("userdispersing")) {
|
||||
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
} else {
|
||||
pools = reorderPoolsByCapacity(plan, pools);
|
||||
|
|
@ -248,7 +252,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
void reorderRandomPools(List<StoragePool> pools) {
|
||||
StorageUtil.traceLogStoragePools(pools, logger, "pools to choose from: ");
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(String.format("Shuffle this so that we don't check the pools in the same order. Algorithm == '%s' (or no account?)", allocationAlgorithm));
|
||||
logger.trace("Shuffle this so that we don't check the pools in the same order. Algorithm == 'random' (or no account?)");
|
||||
}
|
||||
StorageUtil.traceLogStoragePools(pools, logger, "pools to shuffle: ");
|
||||
Collections.shuffle(pools, secureRandom);
|
||||
|
|
|
|||
|
|
@ -16,27 +16,27 @@
|
|||
// under the License.
|
||||
package org.apache.cloudstack.storage.allocator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.storage.VolumeApiServiceImpl;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VolumeApiServiceImpl;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Component
|
||||
public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
||||
|
||||
|
|
@ -116,14 +116,6 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
|
|||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
|
||||
if (configDao != null) {
|
||||
Map<String, String> configs = configDao.getConfiguration(params);
|
||||
String allocationAlgorithm = configs.get("vm.allocation.algorithm");
|
||||
if (allocationAlgorithm != null) {
|
||||
this.allocationAlgorithm = allocationAlgorithm;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,12 +18,16 @@ package org.apache.cloudstack.storage.allocator;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
|
|
@ -45,7 +49,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
@Inject
|
||||
private DataStoreManager dataStoreMgr;
|
||||
@Inject
|
||||
private CapacityDao capacityDao;
|
||||
protected CapacityDao capacityDao;
|
||||
|
||||
@Override
|
||||
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck, String keyword) {
|
||||
|
|
@ -122,9 +126,16 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
return null;
|
||||
}
|
||||
|
||||
List<Long> poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, null, capacityType);
|
||||
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderHostsByFreeCapacity(zoneId, null, capacityType);
|
||||
List<Long> poolIdsByCapacity = result.first();
|
||||
Map<Long, String> sortedHostByCapacity = result.second().entrySet()
|
||||
.stream()
|
||||
.sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, entry -> decimalFormat.format(entry.getValue() * 100) + "%",
|
||||
(e1, e2) -> e1, LinkedHashMap::new));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("List of zone-wide storage pools in descending order of free capacity: "+ poolIdsByCapacity);
|
||||
logger.debug("List of zone-wide storage pools: [{}] in descending order of free capacity (percentage): {}",
|
||||
poolIdsByCapacity, sortedHostByCapacity);
|
||||
}
|
||||
|
||||
//now filter the given list of Pools by this ordered list
|
||||
|
|
|
|||
|
|
@ -18,6 +18,10 @@ package org.apache.cloudstack.storage.datastore;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.ImageStore;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
|
@ -84,6 +88,10 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
|
|||
SnapshotDao snapshotDao;
|
||||
@Inject
|
||||
VolumeDao volumeDao;
|
||||
|
||||
@Inject
|
||||
HostDao hostDao;
|
||||
|
||||
protected StateMachine2<State, Event, DataObjectInStore> stateMachines;
|
||||
|
||||
public ObjectInDataStoreManagerImpl() {
|
||||
|
|
@ -113,6 +121,7 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
|
|||
stateMachines.addTransition(State.Migrating, Event.MigrationSucceeded, State.Destroyed);
|
||||
stateMachines.addTransition(State.Migrating, Event.OperationSuccessed, State.Ready);
|
||||
stateMachines.addTransition(State.Migrating, Event.OperationFailed, State.Ready);
|
||||
stateMachines.addTransition(State.Hidden, Event.DestroyRequested, State.Destroying);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -130,14 +139,17 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
|
|||
ss.setVolumeId(snapshotInfo.getVolumeId());
|
||||
ss.setSize(snapshotInfo.getSize()); // this is the virtual size of snapshot in primary storage.
|
||||
ss.setPhysicalSize(snapshotInfo.getSize()); // this physical size will get updated with actual size once the snapshot backup is done.
|
||||
SnapshotDataStoreVO snapshotDataStoreVO = snapshotDataStoreDao.findParent(dataStore.getRole(), dataStore.getId(), snapshotInfo.getVolumeId());
|
||||
if (snapshotDataStoreVO != null) {
|
||||
Long clusterId = hostDao.findClusterIdByVolumeInfo(snapshotInfo.getBaseVolume());
|
||||
SnapshotDataStoreVO parentSnapshotDataStoreVO = findParent(dataStore, clusterId, snapshotInfo);
|
||||
if (parentSnapshotDataStoreVO != null) {
|
||||
//Double check the snapshot is removed or not
|
||||
SnapshotVO parentSnap = snapshotDao.findById(snapshotDataStoreVO.getSnapshotId());
|
||||
if (parentSnap != null) {
|
||||
ss.setParentSnapshotId(snapshotDataStoreVO.getSnapshotId());
|
||||
SnapshotVO parentSnap = snapshotDao.findById(parentSnapshotDataStoreVO.getSnapshotId());
|
||||
if (parentSnap != null && !parentSnapshotDataStoreVO.isEndOfChain()) {
|
||||
ss.setParentSnapshotId(parentSnapshotDataStoreVO.getSnapshotId());
|
||||
} else if (parentSnapshotDataStoreVO.isEndOfChain()) {
|
||||
logger.debug("Snapshot [{}] will begin a new chain, as the last one has finished.", ss.getSnapshotId());
|
||||
} else {
|
||||
logger.debug("find inconsistent db for snapshot " + snapshotDataStoreVO.getSnapshotId());
|
||||
logger.debug("find inconsistent db for snapshot " + parentSnapshotDataStoreVO.getSnapshotId());
|
||||
}
|
||||
}
|
||||
ss.setState(ObjectInDataStoreStateMachine.State.Allocated);
|
||||
|
|
@ -179,9 +191,12 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
|
|||
ss.setRole(dataStore.getRole());
|
||||
ss.setSize(snapshot.getSize());
|
||||
ss.setVolumeId(snapshot.getVolumeId());
|
||||
SnapshotDataStoreVO snapshotDataStoreVO = snapshotDataStoreDao.findParent(dataStore.getRole(), dataStore.getId(), snapshot.getVolumeId());
|
||||
if (snapshotDataStoreVO != null) {
|
||||
Long clusterId = hostDao.findClusterIdByVolumeInfo(snapshot.getBaseVolume());
|
||||
SnapshotDataStoreVO snapshotDataStoreVO = snapshotDataStoreDao.findParent(dataStore.getRole(), null, ((ImageStore)dataStore).getDataCenterId(), snapshot.getVolumeId(), SnapshotManager.kvmIncrementalSnapshot.valueIn(clusterId), snapshot.getHypervisorType());
|
||||
if (snapshotDataStoreVO != null && !snapshotDataStoreVO.isEndOfChain()) {
|
||||
ss.setParentSnapshotId(snapshotDataStoreVO.getSnapshotId());
|
||||
} else {
|
||||
logger.debug("Snapshot [{}] will begin a new chain, as the last one has finished.", ss.getSnapshotId());
|
||||
}
|
||||
ss.setInstallPath(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR + "/" + snapshotDao.findById(obj.getId()).getAccountId() + "/" + snapshot.getVolumeId());
|
||||
ss.setState(ObjectInDataStoreStateMachine.State.Allocated);
|
||||
|
|
@ -201,6 +216,33 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
|
|||
return this.get(obj, dataStore, null);
|
||||
}
|
||||
|
||||
private SnapshotDataStoreVO findParent(DataStore dataStore, Long clusterId, SnapshotInfo snapshotInfo) {
|
||||
boolean kvmIncrementalSnapshot = SnapshotManager.kvmIncrementalSnapshot.valueIn(clusterId);
|
||||
SnapshotDataStoreVO snapshotDataStoreVO;
|
||||
if (Hypervisor.HypervisorType.KVM.equals(snapshotInfo.getHypervisorType()) && kvmIncrementalSnapshot) {
|
||||
snapshotDataStoreVO = snapshotDataStoreDao.findParent(null, null, null, snapshotInfo.getVolumeId(),
|
||||
kvmIncrementalSnapshot, snapshotInfo.getHypervisorType());
|
||||
snapshotDataStoreVO = returnNullIfNotOnSameTypeOfStoreRole(snapshotInfo, snapshotDataStoreVO);
|
||||
} else {
|
||||
snapshotDataStoreVO = snapshotDataStoreDao.findParent(dataStore.getRole(), dataStore.getId(), null, snapshotInfo.getVolumeId(),
|
||||
kvmIncrementalSnapshot, snapshotInfo.getHypervisorType());
|
||||
}
|
||||
return snapshotDataStoreVO;
|
||||
}
|
||||
|
||||
private SnapshotDataStoreVO returnNullIfNotOnSameTypeOfStoreRole(SnapshotInfo snapshotInfo, SnapshotDataStoreVO snapshotDataStoreVO) {
|
||||
if (snapshotDataStoreVO == null) {
|
||||
return snapshotDataStoreVO;
|
||||
}
|
||||
if ((snapshotInfo.getImageStore() != null && !snapshotDataStoreVO.getRole().isImageStore()) ||
|
||||
(snapshotInfo.getImageStore() == null && snapshotDataStoreVO.getRole().isImageStore())) {
|
||||
snapshotDataStoreVO.setEndOfChain(true);
|
||||
snapshotDataStoreDao.update(snapshotDataStoreVO.getId(), snapshotDataStoreVO);
|
||||
return null;
|
||||
}
|
||||
return snapshotDataStoreVO;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean delete(DataObject dataObj) {
|
||||
long objId = dataObj.getId();
|
||||
|
|
|
|||
|
|
@ -461,45 +461,107 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
|||
|
||||
@Override
|
||||
public EndPoint select(DataObject object, StorageAction action, boolean encryptionRequired) {
|
||||
if (action == StorageAction.TAKESNAPSHOT) {
|
||||
SnapshotInfo snapshotInfo = (SnapshotInfo)object;
|
||||
if (snapshotInfo.getHypervisorType() == Hypervisor.HypervisorType.KVM) {
|
||||
VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
|
||||
VirtualMachine vm = volumeInfo.getAttachedVM();
|
||||
if ((vm != null) && (vm.getState() == VirtualMachine.State.Running)) {
|
||||
Long hostId = vm.getHostId();
|
||||
return getEndPointFromHostId(hostId);
|
||||
switch (action) {
|
||||
case DELETESNAPSHOT:
|
||||
case TAKESNAPSHOT:
|
||||
case CONVERTSNAPSHOT: {
|
||||
SnapshotInfo snapshotInfo = (SnapshotInfo)object;
|
||||
if (Hypervisor.HypervisorType.KVM.equals(snapshotInfo.getHypervisorType())) {
|
||||
return getEndPointForSnapshotOperationsInKvm(snapshotInfo, encryptionRequired);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else if (action == StorageAction.MIGRATEVOLUME) {
|
||||
VolumeInfo volume = (VolumeInfo)object;
|
||||
if (volume.getHypervisorType() == Hypervisor.HypervisorType.Hyperv || volume.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
|
||||
VirtualMachine vm = volume.getAttachedVM();
|
||||
if ((vm != null) && (vm.getState() == VirtualMachine.State.Running)) {
|
||||
Long hostId = vm.getHostId();
|
||||
return getEndPointFromHostId(hostId);
|
||||
}
|
||||
case REMOVEBITMAP: {
|
||||
return getEndPointForBitmapRemoval(object, encryptionRequired);
|
||||
}
|
||||
} else if (action == StorageAction.DELETEVOLUME) {
|
||||
VolumeInfo volume = (VolumeInfo)object;
|
||||
if (volume.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
|
||||
VirtualMachine vm = volume.getAttachedVM();
|
||||
if (vm != null) {
|
||||
Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
|
||||
if (hostId != null) {
|
||||
case MIGRATEVOLUME: {
|
||||
VolumeInfo volume = (VolumeInfo) object;
|
||||
if (volume.getHypervisorType() == Hypervisor.HypervisorType.Hyperv || volume.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
|
||||
VirtualMachine vm = volume.getAttachedVM();
|
||||
if ((vm != null) && (vm.getState() == VirtualMachine.State.Running)) {
|
||||
Long hostId = vm.getHostId();
|
||||
return getEndPointFromHostId(hostId);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case DELETEVOLUME: {
|
||||
VolumeInfo volume = (VolumeInfo) object;
|
||||
if (volume.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
|
||||
VirtualMachine vm = volume.getAttachedVM();
|
||||
if (vm != null) {
|
||||
Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
|
||||
if (hostId != null) {
|
||||
return getEndPointFromHostId(hostId);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
return select(object, encryptionRequired);
|
||||
}
|
||||
|
||||
protected EndPoint getEndPointForBitmapRemoval(DataObject object, boolean encryptionRequired) {
|
||||
SnapshotInfo snapshotInfo = (SnapshotInfo)object;
|
||||
VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
|
||||
|
||||
logger.debug("Selecting endpoint for bitmap removal of volume [{}].", volumeInfo.getUuid());
|
||||
if (volumeInfo.isAttachedVM()) {
|
||||
VirtualMachine attachedVM = volumeInfo.getAttachedVM();
|
||||
if (attachedVM.getHostId() != null) {
|
||||
return getEndPointFromHostId(attachedVM.getHostId());
|
||||
} else if (attachedVM.getLastHostId() != null) {
|
||||
return getEndPointFromHostId(attachedVM.getLastHostId());
|
||||
}
|
||||
}
|
||||
return select(volumeInfo, encryptionRequired);
|
||||
}
|
||||
|
||||
protected EndPoint getEndPointForSnapshotOperationsInKvm(SnapshotInfo snapshotInfo, boolean encryptionRequired) {
|
||||
VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
|
||||
DataStoreRole snapshotDataStoreRole = snapshotInfo.getDataStore().getRole();
|
||||
VirtualMachine vm = volumeInfo.getAttachedVM();
|
||||
|
||||
logger.debug("Selecting endpoint for operation on snapshot [{}] with encryptionRequired as [{}].", snapshotInfo, encryptionRequired);
|
||||
if (vm == null) {
|
||||
if (snapshotDataStoreRole == DataStoreRole.Image) {
|
||||
return selectRandom(snapshotInfo.getDataCenterId(), Hypervisor.HypervisorType.KVM);
|
||||
} else {
|
||||
return select(snapshotInfo, encryptionRequired);
|
||||
}
|
||||
}
|
||||
|
||||
if (vm.getState() == VirtualMachine.State.Running) {
|
||||
return getEndPointFromHostId(vm.getHostId());
|
||||
}
|
||||
|
||||
Long hostId = vm.getLastHostId();
|
||||
if (hostId != null) {
|
||||
return getEndPointFromHostId(hostId);
|
||||
} else if (snapshotDataStoreRole == DataStoreRole.Image) {
|
||||
return selectRandom(snapshotInfo.getDataCenterId(), Hypervisor.HypervisorType.KVM);
|
||||
}
|
||||
|
||||
return select(snapshotInfo, encryptionRequired);
|
||||
}
|
||||
|
||||
@Override
|
||||
public EndPoint select(Scope scope, Long storeId) {
|
||||
return findEndPointInScope(scope, findOneHostOnPrimaryStorage, storeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public EndPoint selectRandom(long zoneId, Hypervisor.HypervisorType hypervisorType) {
|
||||
List<HostVO> hostVOs = hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType);
|
||||
|
||||
if (hostVOs.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
Collections.shuffle(hostVOs);
|
||||
return RemoteHostEndPoint.getHypervisorHostEndPoint(hostVOs.get(0));
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<EndPoint> selectAll(DataStore store) {
|
||||
List<EndPoint> endPoints = new ArrayList<EndPoint>();
|
||||
|
|
|
|||
|
|
@ -29,13 +29,16 @@ import javax.inject.Inject;
|
|||
import com.cloud.dc.dao.ClusterDao;
|
||||
import org.apache.cloudstack.annotation.AnnotationService;
|
||||
import org.apache.cloudstack.annotation.dao.AnnotationDao;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
|
|
@ -78,6 +81,8 @@ public class PrimaryDataStoreHelper {
|
|||
protected ClusterDao clusterDao;
|
||||
@Inject
|
||||
private AnnotationDao annotationDao;
|
||||
@Inject
|
||||
DataStoreProviderManager dataStoreProviderMgr;
|
||||
|
||||
public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) {
|
||||
if(params == null)
|
||||
|
|
@ -144,7 +149,17 @@ public class PrimaryDataStoreHelper {
|
|||
storageTags.add(tag);
|
||||
}
|
||||
}
|
||||
dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule());
|
||||
|
||||
boolean displayDetails = true;
|
||||
DataStoreProvider storeProvider = dataStoreProviderMgr.getDataStoreProvider(params.getProviderName());
|
||||
if (storeProvider != null) {
|
||||
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
|
||||
if (storeDriver != null) {
|
||||
displayDetails = storeDriver.canDisplayDetails();
|
||||
}
|
||||
}
|
||||
|
||||
dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule(), displayDetails);
|
||||
return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,13 +17,19 @@
|
|||
package org.apache.cloudstack.storage.allocator;
|
||||
|
||||
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import com.cloud.capacity.Capacity;
|
||||
import com.cloud.capacity.dao.CapacityDao;
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
|
|
@ -34,14 +40,18 @@ import org.mockito.Mock;
|
|||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class AbstractStoragePoolAllocatorTest {
|
||||
|
|
@ -53,6 +63,10 @@ public class AbstractStoragePoolAllocatorTest {
|
|||
|
||||
@Mock
|
||||
Account account;
|
||||
|
||||
@Mock
|
||||
CapacityDao capacityDao;
|
||||
|
||||
private List<StoragePool> pools;
|
||||
|
||||
@Mock
|
||||
|
|
@ -73,7 +87,8 @@ public class AbstractStoragePoolAllocatorTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_random() {
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_random() throws Exception {
|
||||
overrideDefaultConfigValue( VolumeOrchestrationService.VolumeAllocationAlgorithm, "random");
|
||||
allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderPoolsByCapacity(plan, pools);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
|
|
@ -81,8 +96,8 @@ public class AbstractStoragePoolAllocatorTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_userdispersing() {
|
||||
allocator.allocationAlgorithm = "userdispersing";
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_userdispersing() throws Exception {
|
||||
overrideDefaultConfigValue(VolumeOrchestrationService.VolumeAllocationAlgorithm, "userdispersing");
|
||||
Mockito.doReturn(pools).when(allocator).reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderPoolsByCapacity(plan, pools);
|
||||
|
|
@ -91,10 +106,9 @@ public class AbstractStoragePoolAllocatorTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_userdispersing_reorder_check() {
|
||||
allocator.allocationAlgorithm = "userdispersing";
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_userdispersing_reorder_check() throws Exception {
|
||||
overrideDefaultConfigValue(VolumeOrchestrationService.VolumeAllocationAlgorithm, "userdispersing");
|
||||
allocator.volumeDao = volumeDao;
|
||||
|
||||
when(plan.getDataCenterId()).thenReturn(1l);
|
||||
when(plan.getPodId()).thenReturn(1l);
|
||||
when(plan.getClusterId()).thenReturn(1l);
|
||||
|
|
@ -114,8 +128,8 @@ public class AbstractStoragePoolAllocatorTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_firstfitleastconsumed() {
|
||||
allocator.allocationAlgorithm = "firstfitleastconsumed";
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_firstfitleastconsumed() throws Exception {
|
||||
overrideDefaultConfigValue(VolumeOrchestrationService.VolumeAllocationAlgorithm, "firstfitleastconsumed");
|
||||
Mockito.doReturn(pools).when(allocator).reorderPoolsByCapacity(plan, pools);
|
||||
allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
|
||||
Mockito.verify(allocator, Mockito.times(1)).reorderPoolsByCapacity(plan, pools);
|
||||
|
|
@ -132,6 +146,34 @@ public class AbstractStoragePoolAllocatorTest {
|
|||
}
|
||||
Assert.assertTrue(firstchoice.size() > 2);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithmFirstFitLeastConsumed() throws Exception {
|
||||
overrideDefaultConfigValue(VolumeOrchestrationService.VolumeAllocationAlgorithm, "firstfitleastconsumed");
|
||||
when(plan.getDataCenterId()).thenReturn(1L);
|
||||
when(plan.getClusterId()).thenReturn(1L);
|
||||
StoragePool pool1 = mock(StoragePool.class);
|
||||
StoragePool pool2 = mock(StoragePool.class);
|
||||
when(pool1.getId()).thenReturn(1L);
|
||||
when(pool2.getId()).thenReturn(2L);
|
||||
List<StoragePool> pools = Arrays.asList(pool1, pool2);
|
||||
List<Long> poolIds = Arrays.asList(2L, 1L);
|
||||
Map<Long, Double> hostCapacityMap = new HashMap<>();
|
||||
hostCapacityMap.put(1L, 8.0);
|
||||
hostCapacityMap.put(2L, 8.5);
|
||||
Pair<List<Long>, Map<Long, Double>> poolsOrderedByCapacity = new Pair<>(poolIds, hostCapacityMap);
|
||||
|
||||
allocator.capacityDao = capacityDao;
|
||||
Mockito.when(capacityDao.orderHostsByFreeCapacity(1L, 1L, Capacity.CAPACITY_TYPE_LOCAL_STORAGE)).thenReturn(poolsOrderedByCapacity);
|
||||
List<StoragePool> result = allocator.reorderPoolsByCapacity(plan, pools);
|
||||
assertEquals(Arrays.asList(pool2, pool1), result);
|
||||
}
|
||||
|
||||
private void overrideDefaultConfigValue(final ConfigKey configKey, final String value) throws IllegalAccessException, NoSuchFieldException {
|
||||
final Field f = ConfigKey.class.getDeclaredField("_defaultValue");
|
||||
f.setAccessible(true);
|
||||
f.set(configKey, value);
|
||||
}
|
||||
}
|
||||
|
||||
class MockStorapoolAllocater extends AbstractStoragePoolAllocator {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.cloudstack.storage.allocator;
|
||||
|
||||
import com.cloud.capacity.Capacity;
|
||||
import com.cloud.capacity.dao.CapacityDao;
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.utils.Pair;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class ZoneWideStoragePoolAllocatorTest {
|
||||
private ZoneWideStoragePoolAllocator allocator;
|
||||
private DeploymentPlan plan;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
allocator = new ZoneWideStoragePoolAllocator();
|
||||
plan = mock(DeploymentPlan.class);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReorderPoolsByCapacity() {
|
||||
when(plan.getDataCenterId()).thenReturn(1L);
|
||||
when(plan.getClusterId()).thenReturn(null);
|
||||
StoragePool pool1 = mock(StoragePool.class);
|
||||
StoragePool pool2 = mock(StoragePool.class);
|
||||
when(pool1.getPoolType()).thenReturn(Storage.StoragePoolType.Filesystem);
|
||||
when(pool1.getId()).thenReturn(1L);
|
||||
when(pool2.getId()).thenReturn(2L);
|
||||
List<StoragePool> pools = Arrays.asList(pool1, pool2);
|
||||
List<Long> poolIds = Arrays.asList(2L, 1L);
|
||||
Map<Long, Double> hostCapacityMap = new HashMap<>();
|
||||
hostCapacityMap.put(1L, 8.0);
|
||||
hostCapacityMap.put(2L, 8.5);
|
||||
Pair<List<Long>, Map<Long, Double>> poolsOrderedByCapacity = new Pair<>(poolIds, hostCapacityMap);
|
||||
CapacityDao capacityDao = mock(CapacityDao.class);
|
||||
Mockito.when(capacityDao.orderHostsByFreeCapacity(1L, null, Capacity.CAPACITY_TYPE_LOCAL_STORAGE)).thenReturn(poolsOrderedByCapacity);
|
||||
allocator.capacityDao = capacityDao;
|
||||
List<StoragePool> result = allocator.reorderPoolsByCapacity(plan, pools);
|
||||
assertEquals(Arrays.asList(pool2, pool1), result);
|
||||
}
|
||||
}
|
||||
|
|
@ -88,7 +88,7 @@ public class SnapshotDataStoreDaoImplTest {
|
|||
snapshotDataStoreDaoImplSpy.snapshotVOSearch = searchBuilderMock;
|
||||
Mockito.doReturn(searchCriteriaMock).when(searchBuilderMock).create();
|
||||
Mockito.doReturn(null).when(snapshotDaoMock).findOneBy(Mockito.any());
|
||||
Assert.assertFalse(snapshotDataStoreDaoImplSpy.isSnapshotChainingRequired(2));
|
||||
Assert.assertFalse(snapshotDataStoreDaoImplSpy.isSnapshotChainingRequired(2, false));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -99,7 +99,7 @@ public class SnapshotDataStoreDaoImplTest {
|
|||
|
||||
for (Hypervisor.HypervisorType hypervisorType : Hypervisor.HypervisorType.values()) {
|
||||
Mockito.doReturn(hypervisorType).when(snapshotVoMock).getHypervisorType();
|
||||
boolean result = snapshotDataStoreDaoImplSpy.isSnapshotChainingRequired(2);
|
||||
boolean result = snapshotDataStoreDaoImplSpy.isSnapshotChainingRequired(2, false);
|
||||
|
||||
if (SnapshotDataStoreDaoImpl.HYPERVISORS_SUPPORTING_SNAPSHOTS_CHAINING.contains(hypervisorType)) {
|
||||
Assert.assertTrue(result);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,200 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.endpoint;
|
||||
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class DefaultEndPointSelectorTest {
|
||||
|
||||
@Mock
|
||||
private VirtualMachine virtualMachineMock;
|
||||
|
||||
@Mock
|
||||
private VolumeInfo volumeInfoMock;
|
||||
|
||||
@Mock
|
||||
private SnapshotInfo snapshotInfoMock;
|
||||
|
||||
@Mock
|
||||
private DataStore datastoreMock;
|
||||
|
||||
@Spy
|
||||
private DefaultEndPointSelector defaultEndPointSelectorSpy;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
Mockito.doReturn(volumeInfoMock).when(snapshotInfoMock).getBaseVolume();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getEndPointForBitmapRemovalTestVolumeIsNotAttached() {
|
||||
Mockito.doReturn(false).when(volumeInfoMock).isAttachedVM();
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).select(volumeInfoMock, false);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForBitmapRemoval(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).select(volumeInfoMock, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getEndPointForBitmapRemovalTestVolumeIsAttachedHostIdIsSet() {
|
||||
Mockito.doReturn(true).when(volumeInfoMock).isAttachedVM();
|
||||
Mockito.doReturn(virtualMachineMock).when(volumeInfoMock).getAttachedVM();
|
||||
long hostId = 12L;
|
||||
Mockito.doReturn(hostId).when(virtualMachineMock).getHostId();
|
||||
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).getEndPointFromHostId(hostId);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForBitmapRemoval(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).getEndPointFromHostId(hostId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getEndPointForBitmapRemovalTestVolumeIsAttachedLastHostIdIsSet() {
|
||||
Mockito.doReturn(true).when(volumeInfoMock).isAttachedVM();
|
||||
Mockito.doReturn(virtualMachineMock).when(volumeInfoMock).getAttachedVM();
|
||||
|
||||
Mockito.doReturn(null).when(virtualMachineMock).getHostId();
|
||||
long lastHostId = 13L;
|
||||
Mockito.doReturn(lastHostId).when(virtualMachineMock).getLastHostId();
|
||||
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).getEndPointFromHostId(lastHostId);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForBitmapRemoval(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).getEndPointFromHostId(lastHostId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getEndPointForBitmapRemovalTestVolumeIsAttachedNoHostIsSet() {
|
||||
Mockito.doReturn(true).when(volumeInfoMock).isAttachedVM();
|
||||
Mockito.doReturn(virtualMachineMock).when(volumeInfoMock).getAttachedVM();
|
||||
|
||||
Mockito.doReturn(null).when(virtualMachineMock).getHostId();
|
||||
Mockito.doReturn(null).when(virtualMachineMock).getLastHostId();
|
||||
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).select(volumeInfoMock, false);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForBitmapRemoval(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).select(volumeInfoMock, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getEndPointForSnapshotOperationsInKvmTestVolumeIsNotAttachedToVMAndSnapshotOnPrimary() {
|
||||
Mockito.doReturn(null).when(volumeInfoMock).getAttachedVM();
|
||||
Mockito.doReturn(datastoreMock).when(snapshotInfoMock).getDataStore();
|
||||
Mockito.doReturn(DataStoreRole.Primary).when(datastoreMock).getRole();
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).select(snapshotInfoMock, false);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForSnapshotOperationsInKvm(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).select(snapshotInfoMock, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getEndPointForSnapshotOperationsInKvmTestVolumeIsNotAttachedToVMAndSnapshotOnSecondary() {
|
||||
Mockito.doReturn(null).when(volumeInfoMock).getAttachedVM();
|
||||
Mockito.doReturn(datastoreMock).when(snapshotInfoMock).getDataStore();
|
||||
Mockito.doReturn(DataStoreRole.Image).when(datastoreMock).getRole();
|
||||
long zoneId = 1L;
|
||||
Mockito.doReturn(zoneId).when(snapshotInfoMock).getDataCenterId();
|
||||
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).selectRandom(zoneId, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForSnapshotOperationsInKvm(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).selectRandom(zoneId, Hypervisor.HypervisorType.KVM);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getEndPointForSnapshotOperationsInKvmTestVolumeAttachedToRunningVm() {
|
||||
Mockito.doReturn(virtualMachineMock).when(volumeInfoMock).getAttachedVM();
|
||||
Mockito.doReturn(datastoreMock).when(snapshotInfoMock).getDataStore();
|
||||
Mockito.doReturn(VirtualMachine.State.Running).when(virtualMachineMock).getState();
|
||||
long hostId = 12L;
|
||||
Mockito.doReturn(hostId).when(virtualMachineMock).getHostId();
|
||||
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).getEndPointFromHostId(hostId);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForSnapshotOperationsInKvm(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).getEndPointFromHostId(hostId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getEndPointForSnapshotOperationsInKvmTestVolumeAttachedToStoppedVmAndLastHostIdIsSet() {
|
||||
Mockito.doReturn(virtualMachineMock).when(volumeInfoMock).getAttachedVM();
|
||||
Mockito.doReturn(datastoreMock).when(snapshotInfoMock).getDataStore();
|
||||
Mockito.doReturn(VirtualMachine.State.Stopped).when(virtualMachineMock).getState();
|
||||
long hostId = 13L;
|
||||
Mockito.doReturn(hostId).when(virtualMachineMock).getLastHostId();
|
||||
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).getEndPointFromHostId(hostId);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForSnapshotOperationsInKvm(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).getEndPointFromHostId(hostId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getEndPointForSnapshotOperationsInKvmTestVolumeAttachedToStoppedVmAndLastHostIdIsNotSetAndSnapshotIsOnSecondary() {
|
||||
Mockito.doReturn(virtualMachineMock).when(volumeInfoMock).getAttachedVM();
|
||||
Mockito.doReturn(datastoreMock).when(snapshotInfoMock).getDataStore();
|
||||
Mockito.doReturn(DataStoreRole.Image).when(datastoreMock).getRole();
|
||||
Mockito.doReturn(VirtualMachine.State.Stopped).when(virtualMachineMock).getState();
|
||||
Mockito.doReturn(null).when(virtualMachineMock).getLastHostId();
|
||||
long zoneId = 1L;
|
||||
Mockito.doReturn(zoneId).when(snapshotInfoMock).getDataCenterId();
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).selectRandom(zoneId, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForSnapshotOperationsInKvm(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).selectRandom(zoneId, Hypervisor.HypervisorType.KVM);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void getEndPointForSnapshotOperationsInKvmTestVolumeAttachedToStoppedVmAndLastHostIdIsNotSetAndSnapshotIsOnPrimary() {
|
||||
Mockito.doReturn(virtualMachineMock).when(volumeInfoMock).getAttachedVM();
|
||||
Mockito.doReturn(datastoreMock).when(snapshotInfoMock).getDataStore();
|
||||
Mockito.doReturn(DataStoreRole.Primary).when(datastoreMock).getRole();
|
||||
Mockito.doReturn(VirtualMachine.State.Stopped).when(virtualMachineMock).getState();
|
||||
Mockito.doReturn(null).when(virtualMachineMock).getLastHostId();
|
||||
Mockito.doReturn(null).when(defaultEndPointSelectorSpy).select(snapshotInfoMock, false);
|
||||
|
||||
defaultEndPointSelectorSpy.getEndPointForSnapshotOperationsInKvm(snapshotInfoMock, false);
|
||||
|
||||
Mockito.verify(defaultEndPointSelectorSpy, Mockito.times(1)).select(snapshotInfoMock, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -24,9 +24,11 @@ import com.cloud.configuration.Resource.ResourceType;
|
|||
import com.cloud.dc.VsphereStoragePolicyVO;
|
||||
import com.cloud.dc.dao.VsphereStoragePolicyDao;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.TransactionCallbackNoReturn;
|
||||
import com.cloud.utils.db.TransactionStatus;
|
||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||
import org.apache.cloudstack.secret.dao.PassphraseDao;
|
||||
import org.apache.cloudstack.secret.PassphraseVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDetailsDao;
|
||||
|
|
@ -82,6 +84,7 @@ import java.util.Arrays;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||
|
||||
|
|
@ -114,6 +117,9 @@ public class VolumeObject implements VolumeInfo {
|
|||
VsphereStoragePolicyDao vsphereStoragePolicyDao;
|
||||
@Inject
|
||||
PassphraseDao passphraseDao;
|
||||
@Inject
|
||||
VolumeOrchestrationService
|
||||
orchestrationService;
|
||||
|
||||
private Object payload;
|
||||
private MigrationOptions migrationOptions;
|
||||
|
|
@ -121,6 +127,9 @@ public class VolumeObject implements VolumeInfo {
|
|||
private String vSphereStoragePolicyId;
|
||||
private boolean followRedirects;
|
||||
|
||||
private List<String> checkpointPaths;
|
||||
private Set<String> checkpointImageStoreUrls;
|
||||
|
||||
private final List<Volume.State> volumeStatesThatShouldNotTransitWhenDataStoreRoleIsImage = Arrays.asList(Volume.State.Migrating, Volume.State.Uploaded, Volume.State.Copying,
|
||||
Volume.State.Expunged);
|
||||
|
||||
|
|
@ -136,6 +145,9 @@ public class VolumeObject implements VolumeInfo {
|
|||
protected void configure(DataStore dataStore, VolumeVO volumeVO) {
|
||||
this.volumeVO = volumeVO;
|
||||
this.dataStore = dataStore;
|
||||
Pair<List<String>, Set<String>> volumeCheckPointPathsAndImageStoreUrls = orchestrationService.getVolumeCheckpointPathsAndImageStoreUrls(volumeVO.getId(), getHypervisorType());
|
||||
this.checkpointPaths = volumeCheckPointPathsAndImageStoreUrls.first();
|
||||
this.checkpointImageStoreUrls = volumeCheckPointPathsAndImageStoreUrls.second();
|
||||
}
|
||||
|
||||
public static VolumeObject getVolumeObject(DataStore dataStore, VolumeVO volumeVO) {
|
||||
|
|
@ -945,6 +957,16 @@ public class VolumeObject implements VolumeInfo {
|
|||
return followRedirects;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getCheckpointPaths() {
|
||||
return checkpointPaths;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getCheckpointImageStoreUrls() {
|
||||
return checkpointImageStoreUrls;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("VolumeObject %s",
|
||||
|
|
|
|||
|
|
@ -497,6 +497,9 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
long storagePoolId = snapStoreVo.getDataStoreId();
|
||||
StoragePoolVO storagePoolVO = storagePoolDao.findById(storagePoolId);
|
||||
|
||||
if (StoragePoolType.StorPool.equals(storagePoolVO.getPoolType())) {
|
||||
continue;
|
||||
}
|
||||
if (storagePoolVO.isManaged()) {
|
||||
DataStore primaryDataStore = dataStoreMgr.getPrimaryDataStore(storagePoolId);
|
||||
Map<String, String> mapCapabilities = primaryDataStore.getDriver().getCapabilities();
|
||||
|
|
@ -507,10 +510,10 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
if (!supportsStorageSystemSnapshots) {
|
||||
_snapshotStoreDao.remove(snapStoreVo.getId());
|
||||
}
|
||||
} else if (HypervisorType.KVM.equals(vo.getHypervisorType())) {
|
||||
deleteKvmSnapshotOnPrimary(snapStoreVo);
|
||||
} else {
|
||||
if (!StoragePoolType.StorPool.equals(storagePoolVO.getPoolType())) {
|
||||
_snapshotStoreDao.remove(snapStoreVo.getId());
|
||||
}
|
||||
_snapshotStoreDao.remove(snapStoreVo.getId());
|
||||
}
|
||||
}
|
||||
snapshotApiService.markVolumeSnapshotsAsDestroyed(vo);
|
||||
|
|
@ -525,6 +528,21 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the snapshot from primary storage if the only storage associated with the snapshot is of the Primary role; else, just removes the primary record on the DB.
|
||||
* */
|
||||
protected void deleteKvmSnapshotOnPrimary(SnapshotDataStoreVO snapshotDataStoreVO) {
|
||||
List<SnapshotDataStoreVO> snapshotDataStoreVOList = _snapshotStoreDao.findBySnapshotId(snapshotDataStoreVO.getSnapshotId());
|
||||
for (SnapshotDataStoreVO snapshotStore : snapshotDataStoreVOList) {
|
||||
if (DataStoreRole.Image.equals(snapshotStore.getRole())) {
|
||||
_snapshotStoreDao.remove(snapshotDataStoreVO.getId());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
snapshotApiService.deleteSnapshot(snapshotDataStoreVO.getSnapshotId(), null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cloneVolume(long volumeId, long baseVolId) {
|
||||
// TODO Auto-generated method stub
|
||||
|
|
|
|||
|
|
@ -21,12 +21,14 @@ package org.apache.cloudstack.storage.volume;
|
|||
|
||||
import com.cloud.agent.api.storage.DownloadAnswer;
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import java.util.Arrays;
|
||||
|
|
@ -38,6 +40,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
|
|
@ -82,6 +85,9 @@ public class VolumeObjectTest extends TestCase{
|
|||
@Mock
|
||||
ObjectInDataStoreManager objectInDataStoreManagerMock;
|
||||
|
||||
@Mock
|
||||
VolumeOrchestrationService orchestrationServiceMock;
|
||||
|
||||
Set<Function<DiskOfferingVO, Long>> diskOfferingVoMethodsWithLongReturn = new HashSet<>();
|
||||
|
||||
List<ObjectInDataStoreStateMachine.Event> objectInDataStoreStateMachineEvents = Arrays.asList(ObjectInDataStoreStateMachine.Event.values());
|
||||
|
|
@ -90,6 +96,9 @@ public class VolumeObjectTest extends TestCase{
|
|||
|
||||
@Before
|
||||
public void setup(){
|
||||
volumeObjectSpy.orchestrationService = orchestrationServiceMock;
|
||||
Mockito.doReturn(new Pair<>(List.of(), Set.of())).when(orchestrationServiceMock).getVolumeCheckpointPathsAndImageStoreUrls(Mockito.anyLong(), Mockito.any());
|
||||
Mockito.doReturn(Hypervisor.HypervisorType.KVM).when(volumeObjectSpy).getHypervisorType();
|
||||
volumeObjectSpy.configure(dataStoreMock, volumeVoMock);
|
||||
volumeObjectSpy.volumeStoreDao = volumeDataStoreDaoMock;
|
||||
volumeObjectSpy.volumeDao = volumeDaoMock;
|
||||
|
|
@ -362,7 +371,8 @@ public class VolumeObjectTest extends TestCase{
|
|||
volumeObjectTo.setFormat(null);
|
||||
|
||||
volumeObjectSpy.setVolumeFormat(volumeObjectTo, false, volumeVoMock);
|
||||
Mockito.verifyNoInteractions(volumeVoMock);
|
||||
|
||||
Mockito.verify(volumeVoMock, Mockito.never()).setFormat(Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -371,7 +381,8 @@ public class VolumeObjectTest extends TestCase{
|
|||
volumeObjectTo.setFormat(null);
|
||||
|
||||
volumeObjectSpy.setVolumeFormat(volumeObjectTo, true, volumeVoMock);
|
||||
Mockito.verifyNoInteractions(volumeVoMock);
|
||||
|
||||
Mockito.verify(volumeVoMock, Mockito.never()).setFormat(Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -385,7 +396,7 @@ public class VolumeObjectTest extends TestCase{
|
|||
volumeObjectSpy.setVolumeFormat(volumeObjectTo, false, volumeVoMock);
|
||||
});
|
||||
|
||||
Mockito.verifyNoInteractions(volumeVoMock);
|
||||
Mockito.verify(volumeVoMock, Mockito.never()).setFormat(Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
|||
|
|
@ -178,7 +178,8 @@ public class VolumeServiceTest extends TestCase{
|
|||
|
||||
@Test
|
||||
public void validateCopyPoliciesBetweenVolumesAndDestroySourceVolumeAfterMigrationReturnTrueOrFalse() throws ExecutionException, InterruptedException{
|
||||
VolumeObject volumeObject = new VolumeObject();
|
||||
VolumeObject volumeObject = Mockito.mock(VolumeObject.class);
|
||||
Mockito.doReturn(new VolumeVO() {}).when(volumeObject).getVolume();
|
||||
volumeObject.configure(null, new VolumeVO() {});
|
||||
|
||||
Mockito.doNothing().when(snapshotManagerMock).copySnapshotPoliciesBetweenVolumes(Mockito.any(), Mockito.any());
|
||||
|
|
@ -196,8 +197,8 @@ public class VolumeServiceTest extends TestCase{
|
|||
@Test (expected = Exception.class)
|
||||
public void validateCopyPoliciesBetweenVolumesAndDestroySourceVolumeAfterMigrationThrowAnyOtherException() throws
|
||||
ExecutionException, InterruptedException{
|
||||
VolumeObject volumeObject = new VolumeObject();
|
||||
volumeObject.configure(null, new VolumeVO() {});
|
||||
VolumeObject volumeObject = Mockito.mock(VolumeObject.class);
|
||||
Mockito.doReturn(new VolumeVO() {}).when(volumeObject).getVolume();
|
||||
|
||||
volumeServiceImplSpy.copyPoliciesBetweenVolumesAndDestroySourceVolumeAfterMigration(ObjectInDataStoreStateMachine.Event.DestroyRequested, null, volumeObject,
|
||||
volumeObject, true);
|
||||
|
|
@ -205,8 +206,8 @@ public class VolumeServiceTest extends TestCase{
|
|||
|
||||
@Test
|
||||
public void validateDestroySourceVolumeAfterMigrationReturnTrue() throws ExecutionException, InterruptedException{
|
||||
VolumeObject volumeObject = new VolumeObject();
|
||||
volumeObject.configure(null, new VolumeVO() {});
|
||||
VolumeObject volumeObject = Mockito.mock(VolumeObject.class);
|
||||
Mockito.doReturn(new VolumeVO() {}).when(volumeObject).getVolume();
|
||||
|
||||
Mockito.doReturn(true).when(volumeDaoMock).updateUuid(Mockito.anyLong(), Mockito.anyLong());
|
||||
Mockito.doNothing().when(volumeServiceImplSpy).destroyVolume(Mockito.anyLong());
|
||||
|
|
@ -221,10 +222,11 @@ public class VolumeServiceTest extends TestCase{
|
|||
@Test
|
||||
public void validateDestroySourceVolumeAfterMigrationExpungeSourceVolumeAfterMigrationThrowExceptionReturnFalse() throws
|
||||
ExecutionException, InterruptedException{
|
||||
VolumeObject volumeObject = new VolumeObject();
|
||||
VolumeVO vo = new VolumeVO() {};
|
||||
vo.setPoolType(Storage.StoragePoolType.Filesystem);
|
||||
volumeObject.configure(null, vo);
|
||||
|
||||
VolumeObject volumeObject = Mockito.mock(VolumeObject.class);
|
||||
Mockito.doReturn(vo).when(volumeObject).getVolume();
|
||||
vo.setPoolId(1L);
|
||||
|
||||
List<Exception> exceptions = new ArrayList<>(Arrays.asList(new InterruptedException(), new ExecutionException() {}));
|
||||
|
|
|
|||
|
|
@ -83,6 +83,10 @@
|
|||
<version>${project.version}</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.json</groupId>
|
||||
<artifactId>json</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
|
|
|
|||
|
|
@ -51,7 +51,18 @@ import java.util.stream.Stream;
|
|||
|
||||
import javax.naming.ConfigurationException;
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.transform.OutputKeys;
|
||||
import javax.xml.transform.Transformer;
|
||||
import javax.xml.transform.TransformerException;
|
||||
import javax.xml.transform.TransformerFactory;
|
||||
import javax.xml.transform.dom.DOMSource;
|
||||
import javax.xml.transform.stream.StreamResult;
|
||||
import javax.xml.xpath.XPath;
|
||||
import javax.xml.xpath.XPathConstants;
|
||||
import javax.xml.xpath.XPathExpressionException;
|
||||
import javax.xml.xpath.XPathFactory;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants.IoDriverPolicy;
|
||||
import org.apache.cloudstack.command.CommandInfo;
|
||||
|
|
@ -78,6 +89,7 @@ import org.apache.cloudstack.utils.qemu.QemuObject;
|
|||
import org.apache.cloudstack.utils.security.KeyStoreUtils;
|
||||
import org.apache.cloudstack.utils.security.ParserUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.collections4.CollectionUtils;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang.ArrayUtils;
|
||||
import org.apache.commons.lang.BooleanUtils;
|
||||
|
|
@ -262,8 +274,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
/**
|
||||
* Machine type.
|
||||
*/
|
||||
private static final String PC = isHostS390x() ? S390X_VIRTIO_DEVICE : "pc";
|
||||
private static final String VIRT = isHostS390x() ? S390X_VIRTIO_DEVICE : "virt";
|
||||
public static final String PC = isHostS390x() ? S390X_VIRTIO_DEVICE : "pc";
|
||||
public static final String VIRT = isHostS390x() ? S390X_VIRTIO_DEVICE : "virt";
|
||||
|
||||
/**
|
||||
* Possible devices to add to VM.
|
||||
|
|
@ -352,6 +364,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
public static final String COMMANDS_LOG_PATH = "/usr/share/cloudstack-agent/tmp/commands";
|
||||
|
||||
public static final String CHECKPOINT_CREATE_COMMAND = "virsh checkpoint-create --domain %s --xmlfile %s --redefine";
|
||||
|
||||
public static final String CHECKPOINT_DELETE_COMMAND = "virsh checkpoint-delete --domain %s --checkpointname %s --metadata";
|
||||
|
||||
private String modifyVlanPath;
|
||||
private String versionStringPath;
|
||||
private String patchScriptPath;
|
||||
|
|
@ -414,6 +430,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
private final static long HYPERVISOR_QEMU_VERSION_SUPPORTS_IO_URING = 5000000;
|
||||
private final static long HYPERVISOR_QEMU_VERSION_IDE_DISCARD_FIXED = 7000000;
|
||||
|
||||
private static final int MINIMUM_LIBVIRT_VERSION_FOR_INCREMENTAL_SNAPSHOT = 7006000;
|
||||
|
||||
private static final int MINIMUM_QEMU_VERSION_FOR_INCREMENTAL_SNAPSHOT = 6001000;
|
||||
|
||||
protected HypervisorType hypervisorType;
|
||||
protected String hypervisorURI;
|
||||
protected long hypervisorLibvirtVersion;
|
||||
|
|
@ -539,11 +559,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
public static final String CGROUP_V2 = "cgroup2fs";
|
||||
|
||||
protected long getHypervisorLibvirtVersion() {
|
||||
public long getHypervisorLibvirtVersion() {
|
||||
return hypervisorLibvirtVersion;
|
||||
}
|
||||
|
||||
protected long getHypervisorQemuVersion() {
|
||||
public long getHypervisorQemuVersion() {
|
||||
return hypervisorQemuVersion;
|
||||
}
|
||||
|
||||
|
|
@ -1930,6 +1950,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
}
|
||||
|
||||
public String startVM(final Connect conn, final String vmName, final String domainXML) throws LibvirtException, InternalErrorException {
|
||||
return startVM(conn, vmName, domainXML, 0);
|
||||
}
|
||||
|
||||
public String startVM(final Connect conn, final String vmName, final String domainXML, int flags) throws LibvirtException, InternalErrorException {
|
||||
try {
|
||||
/*
|
||||
We create a transient domain here. When this method gets
|
||||
|
|
@ -1957,7 +1981,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
}
|
||||
}
|
||||
|
||||
conn.domainCreateXML(domainXML, 0);
|
||||
conn.domainCreateXML(domainXML, flags);
|
||||
} catch (final LibvirtException e) {
|
||||
throw e;
|
||||
}
|
||||
|
|
@ -3006,7 +3030,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return uefiProperties.getProperty(propertie) != null;
|
||||
}
|
||||
|
||||
private boolean isGuestAarch64() {
|
||||
public boolean isGuestAarch64() {
|
||||
return AARCH64.equals(guestCpuArch);
|
||||
}
|
||||
|
||||
|
|
@ -4845,6 +4869,112 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return freeMemory;
|
||||
}
|
||||
|
||||
public void removeCheckpointsOnVm(String vmName, String volumeUuid, List<String> checkpointPaths) {
|
||||
logger.debug("Removing checkpoints with paths [{}] of volume [{}] on VM [{}].", checkpointPaths, volumeUuid, vmName);
|
||||
String checkpointName;
|
||||
for (String checkpointPath : checkpointPaths) {
|
||||
checkpointName = checkpointPath.substring(checkpointPath.lastIndexOf("/") + 1);
|
||||
Script.runSimpleBashScript(String.format(CHECKPOINT_DELETE_COMMAND, vmName, checkpointName));
|
||||
}
|
||||
logger.debug("Removed all checkpoints of volume [{}] on VM [{}].", volumeUuid, vmName);
|
||||
}
|
||||
|
||||
public boolean recreateCheckpointsOnVm(List<VolumeObjectTO> volumes, String vmName, Connect conn) {
|
||||
logger.debug("Trying to recreate checkpoints on VM [{}] with volumes [{}].", vmName, volumes);
|
||||
try {
|
||||
validateLibvirtAndQemuVersionForIncrementalSnapshots();
|
||||
} catch (CloudRuntimeException e) {
|
||||
logger.warn("Will not recreate the checkpoints on VM as {}", e.getMessage(), e);
|
||||
return false;
|
||||
}
|
||||
List<DiskDef> diskDefs = getDisks(conn, vmName);
|
||||
Map<VolumeObjectTO, DiskDef> mapDiskToDiskDef = mapVolumeToDiskDef(volumes, diskDefs);
|
||||
|
||||
for (VolumeObjectTO volume : volumes) {
|
||||
if (CollectionUtils.isEmpty(volume.getCheckpointPaths())) {
|
||||
continue;
|
||||
}
|
||||
Set<KVMStoragePool> storagePoolSet = connectToAllVolumeSnapshotSecondaryStorages(volume);
|
||||
recreateCheckpointsOfDisk(vmName, volume, mapDiskToDiskDef);
|
||||
disconnectAllVolumeSnapshotSecondaryStorages(storagePoolSet);
|
||||
}
|
||||
logger.debug("Successfully recreated all checkpoints on VM [{}].", vmName);
|
||||
return true;
|
||||
}
|
||||
|
||||
public Set<KVMStoragePool> connectToAllVolumeSnapshotSecondaryStorages(VolumeObjectTO volumeObjectTO) {
|
||||
return volumeObjectTO.getCheckpointImageStoreUrls().stream().map(uri -> getStoragePoolMgr().getStoragePoolByURI(uri)).collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
public void disconnectAllVolumeSnapshotSecondaryStorages(Set<KVMStoragePool> kvmStoragePools) {
|
||||
kvmStoragePools.forEach(storage -> getStoragePoolMgr().deleteStoragePool(storage.getType(), storage.getUuid()));
|
||||
}
|
||||
|
||||
|
||||
protected void recreateCheckpointsOfDisk(String vmName, VolumeObjectTO volume, Map<VolumeObjectTO, DiskDef> mapDiskToDiskDef) {
|
||||
for (String path : volume.getCheckpointPaths()) {
|
||||
DiskDef diskDef = mapDiskToDiskDef.get(volume);
|
||||
if (diskDef != null) {
|
||||
try {
|
||||
updateDiskLabelOnXml(path, diskDef.getDiskLabel());
|
||||
} catch (ParserConfigurationException | IOException | SAXException | TransformerException | XPathExpressionException e) {
|
||||
logger.error("Exception while parsing checkpoint XML with path [{}].", path, e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
} else {
|
||||
logger.debug("Could not map [{}] to any disk definition. Will try to recreate snapshot without updating disk label.", volume);
|
||||
}
|
||||
|
||||
logger.trace("Recreating checkpoint with path [{}] on VM [{}].", path, vmName);
|
||||
Script.runSimpleBashScript(String.format(CHECKPOINT_CREATE_COMMAND, vmName, path));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Changes the value of the disk label of the checkpoint XML found in {@code path} to {@code label}. This method assumes that the checkpoint only contains one disk.
|
||||
* @param path the path to the checkpoint XML to be updated
|
||||
* @param label the new label to be used for the disk
|
||||
* */
|
||||
private void updateDiskLabelOnXml(String path, String label) throws ParserConfigurationException, IOException, SAXException, XPathExpressionException, TransformerException {
|
||||
logger.trace("Updating checkpoint with path [{}] to use disk label [{}].", path, label);
|
||||
|
||||
DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory();
|
||||
DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
|
||||
Document doc = docBuilder.parse(new File(path));
|
||||
|
||||
XPath xPath = XPathFactory.newInstance().newXPath();
|
||||
Node diskNode = (Node) xPath.compile("/domaincheckpoint/disks/disk").evaluate(doc, XPathConstants.NODE);
|
||||
diskNode.getAttributes().getNamedItem("name").setNodeValue(label);
|
||||
|
||||
Transformer tf = TransformerFactory.newInstance().newTransformer();
|
||||
tf.setOutputProperty(OutputKeys.INDENT, "yes");
|
||||
tf.setOutputProperty(OutputKeys.METHOD, "xml");
|
||||
tf.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "4");
|
||||
|
||||
DOMSource domSource = new DOMSource(doc);
|
||||
StreamResult sr = new StreamResult(new File(path));
|
||||
tf.transform(domSource, sr);
|
||||
}
|
||||
|
||||
protected Map<VolumeObjectTO, DiskDef> mapVolumeToDiskDef(List<VolumeObjectTO> volumeTos, List<DiskDef> diskDefs) {
|
||||
HashMap<VolumeObjectTO, DiskDef> diskToDiskDefHashMap = new HashMap<>();
|
||||
for (VolumeObjectTO volumeTo : volumeTos) {
|
||||
for (DiskDef diskDef : diskDefs) {
|
||||
if (StringUtils.contains(diskDef.getDiskPath(), volumeTo.getPath())) {
|
||||
diskToDiskDefHashMap.put(volumeTo, diskDef);
|
||||
}
|
||||
}
|
||||
}
|
||||
return diskToDiskDefHashMap;
|
||||
}
|
||||
|
||||
public void validateLibvirtAndQemuVersionForIncrementalSnapshots() {
|
||||
if (getHypervisorLibvirtVersion() < MINIMUM_LIBVIRT_VERSION_FOR_INCREMENTAL_SNAPSHOT || getHypervisorQemuVersion() < MINIMUM_QEMU_VERSION_FOR_INCREMENTAL_SNAPSHOT) {
|
||||
throw new CloudRuntimeException(String.format("Hypervisor version is insufficient, should have at least libvirt [%s] and qemu [%s] but we have [%s] and [%s].",
|
||||
MINIMUM_LIBVIRT_VERSION_FOR_INCREMENTAL_SNAPSHOT, MINIMUM_QEMU_VERSION_FOR_INCREMENTAL_SNAPSHOT, getHypervisorLibvirtVersion(), getHypervisorQemuVersion()));
|
||||
}
|
||||
}
|
||||
|
||||
private boolean canBridgeFirewall(final String prvNic) {
|
||||
final Script cmd = new Script(securityGroupPath, timeout, LOGGER);
|
||||
cmd.add("can_bridge_firewall");
|
||||
|
|
@ -5810,4 +5940,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public String getHypervisorPath() {
|
||||
return hypervisorPath;
|
||||
}
|
||||
public String getGuestCpuArch() {
|
||||
return guestCpuArch;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,16 +16,23 @@
|
|||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.resource;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.parsers.SAXParser;
|
||||
import javax.xml.parsers.SAXParserFactory;
|
||||
import javax.xml.transform.Transformer;
|
||||
import javax.xml.transform.TransformerException;
|
||||
import javax.xml.transform.TransformerFactory;
|
||||
import javax.xml.transform.dom.DOMSource;
|
||||
import javax.xml.transform.stream.StreamResult;
|
||||
|
||||
import org.apache.cloudstack.utils.security.ParserUtils;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.w3c.dom.Document;
|
||||
import org.xml.sax.InputSource;
|
||||
import org.xml.sax.SAXException;
|
||||
import org.xml.sax.helpers.DefaultHandler;
|
||||
|
|
@ -65,6 +72,20 @@ public class LibvirtXMLParser extends DefaultHandler {
|
|||
return false;
|
||||
}
|
||||
|
||||
public static String getXml(Document doc) throws TransformerException {
|
||||
TransformerFactory transformerFactory = ParserUtils.getSaferTransformerFactory();
|
||||
Transformer transformer = transformerFactory.newTransformer();
|
||||
|
||||
DOMSource source = new DOMSource(doc);
|
||||
|
||||
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
|
||||
StreamResult result = new StreamResult(byteArrayOutputStream);
|
||||
|
||||
transformer.transform(source, result);
|
||||
|
||||
return byteArrayOutputStream.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void characters(char[] ch, int start, int length) throws SAXException {
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.ConvertSnapshotAnswer;
|
||||
import com.cloud.agent.api.ConvertSnapshotCommand;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
|
||||
import com.cloud.agent.api.to.NfsTO;
|
||||
import com.cloud.agent.properties.AgentProperties;
|
||||
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Set;
|
||||
|
||||
@ResourceWrapper(handles = ConvertSnapshotCommand.class)
|
||||
public class LibvirtConvertSnapshotCommandWrapper extends CommandWrapper<ConvertSnapshotCommand, Answer, LibvirtComputingResource> {
|
||||
|
||||
@Override
|
||||
public Answer execute(ConvertSnapshotCommand command, LibvirtComputingResource serverResource) {
|
||||
SnapshotObjectTO snapshotObjectTO = command.getSnapshotObjectTO();
|
||||
DataStoreTO imageStore = snapshotObjectTO.getDataStore();
|
||||
|
||||
logger.debug(String.format("Converting snapshot [%s] in image store [%s].", snapshotObjectTO.getId(), imageStore.getUuid()));
|
||||
|
||||
if (!(imageStore instanceof NfsTO)) {
|
||||
return new Answer(command, false, "Image Store must be NFS.");
|
||||
}
|
||||
NfsTO nfsImageStore = (NfsTO)imageStore;
|
||||
|
||||
String secondaryStoragePoolUrl = nfsImageStore.getUrl();
|
||||
|
||||
Set<KVMStoragePool> storagePoolSet = null;
|
||||
KVMStoragePool secondaryStorage = null;
|
||||
try {
|
||||
secondaryStorage = serverResource.getStoragePoolMgr().getStoragePoolByURI(secondaryStoragePoolUrl);
|
||||
storagePoolSet = serverResource.connectToAllVolumeSnapshotSecondaryStorages(snapshotObjectTO.getVolume());
|
||||
|
||||
String snapshotRelativePath = snapshotObjectTO.getPath();
|
||||
String snapshotPath = secondaryStorage.getLocalPathFor(snapshotRelativePath);
|
||||
|
||||
String tempSnapshotPath = snapshotPath + ConvertSnapshotCommand.TEMP_SNAPSHOT_NAME;
|
||||
|
||||
logger.debug(String.format("Converting snapshot [%s] to [%s]. The original snapshot is at [%s].", snapshotObjectTO.getId(), tempSnapshotPath, snapshotPath));
|
||||
|
||||
QemuImg qemuImg = new QemuImg(AgentPropertiesFileHandler.getPropertyValue(AgentProperties.INCREMENTAL_SNAPSHOT_TIMEOUT) * 1000);
|
||||
|
||||
QemuImgFile snapshot = new QemuImgFile(snapshotPath, QemuImg.PhysicalDiskFormat.QCOW2);
|
||||
QemuImgFile tempSnapshot = new QemuImgFile(tempSnapshotPath, QemuImg.PhysicalDiskFormat.QCOW2);
|
||||
|
||||
qemuImg.convert(snapshot, tempSnapshot);
|
||||
|
||||
SnapshotObjectTO convertedSnapshot = new SnapshotObjectTO();
|
||||
convertedSnapshot.setPath(snapshotRelativePath + ConvertSnapshotCommand.TEMP_SNAPSHOT_NAME);
|
||||
|
||||
final File snapFile = new File(tempSnapshotPath);
|
||||
|
||||
if (!snapFile.exists()) {
|
||||
return new Answer(command, false, "Failed to convert snapshot.");
|
||||
}
|
||||
|
||||
convertedSnapshot.setPhysicalSize(snapFile.length());
|
||||
logger.debug(String.format("Successfully converted snapshot [%s] to [%s].", snapshotObjectTO.getId(), tempSnapshotPath));
|
||||
|
||||
return new ConvertSnapshotAnswer(convertedSnapshot);
|
||||
} catch (LibvirtException | QemuImgException ex) {
|
||||
logger.error(String.format("Failed to convert snapshot [%s] due to %s.", snapshotObjectTO, ex.getMessage()), ex);
|
||||
return new Answer(command, ex);
|
||||
} finally {
|
||||
if (secondaryStorage != null) {
|
||||
serverResource.getStoragePoolMgr().deleteStoragePool(secondaryStorage.getType(), secondaryStorage.getUuid());
|
||||
}
|
||||
if (storagePoolSet != null) {
|
||||
serverResource.disconnectAllVolumeSnapshotSecondaryStorages(storagePoolSet);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -66,10 +66,13 @@ public final class LibvirtGetVmIpAddressCommandWrapper extends CommandWrapper<Ge
|
|||
|
||||
String sanitizedVmName = sanitizeBashCommandArgument(vmName);
|
||||
String networkCidr = command.getVmNetworkCidr();
|
||||
String macAddress = command.getMacAddress();
|
||||
|
||||
ip = ipFromDomIf(sanitizedVmName, networkCidr);
|
||||
init();
|
||||
|
||||
if (ip == null) {
|
||||
ip = ipFromDomIf(sanitizedVmName, networkCidr, macAddress);
|
||||
|
||||
if (ip == null && networkCidr != null) {
|
||||
if(!command.isWindows()) {
|
||||
ip = ipFromDhcpLeaseFile(sanitizedVmName, networkCidr);
|
||||
} else {
|
||||
|
|
@ -87,25 +90,17 @@ public final class LibvirtGetVmIpAddressCommandWrapper extends CommandWrapper<Ge
|
|||
return new Answer(command, result, ip);
|
||||
}
|
||||
|
||||
private String ipFromDomIf(String sanitizedVmName, String networkCidr) {
|
||||
private String ipFromDomIf(String sanitizedVmName, String networkCidr, String macAddress) {
|
||||
String ip = null;
|
||||
List<String[]> commands = new ArrayList<>();
|
||||
commands.add(new String[]{virsh_path, "domifaddr", sanitizedVmName, "--source", "agent"});
|
||||
Pair<Integer,String> response = executePipedCommands(commands, 0);
|
||||
if (response != null) {
|
||||
String output = response.second();
|
||||
String[] lines = output.split("\n");
|
||||
for (String line : lines) {
|
||||
if (line.contains("ipv4")) {
|
||||
String[] parts = line.split(" ");
|
||||
String[] ipParts = parts[parts.length-1].split("/");
|
||||
if (ipParts.length > 1) {
|
||||
if (NetUtils.isIpWithInCidrRange(ipParts[0], networkCidr)) {
|
||||
ip = ipParts[0];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Pair<String, String> ipAddresses = getIpAddresses(output, macAddress);
|
||||
String ipv4 = ipAddresses.first();
|
||||
if (networkCidr == null || NetUtils.isIpWithInCidrRange(ipv4, networkCidr)) {
|
||||
ip = ipv4;
|
||||
}
|
||||
} else {
|
||||
logger.error("ipFromDomIf: Command execution failed for VM: " + sanitizedVmName);
|
||||
|
|
@ -113,6 +108,38 @@ public final class LibvirtGetVmIpAddressCommandWrapper extends CommandWrapper<Ge
|
|||
return ip;
|
||||
}
|
||||
|
||||
private Pair<String, String> getIpAddresses(String output, String macAddress) {
|
||||
String ipv4 = null;
|
||||
String ipv6 = null;
|
||||
boolean found = false;
|
||||
String[] lines = output.split("\n");
|
||||
for (String line : lines) {
|
||||
String[] parts = line.replaceAll(" +", " ").trim().split(" ");
|
||||
if (parts.length < 4) {
|
||||
continue;
|
||||
}
|
||||
String device = parts[0];
|
||||
String mac = parts[1];
|
||||
if (found) {
|
||||
if (!device.equals("-") || !mac.equals("-")) {
|
||||
break;
|
||||
}
|
||||
} else if (!mac.equals(macAddress)) {
|
||||
continue;
|
||||
}
|
||||
found = true;
|
||||
String ipFamily = parts[2];
|
||||
String ipPart = parts[3].split("/")[0];
|
||||
if (ipFamily.equals("ipv4")) {
|
||||
ipv4 = ipPart;
|
||||
} else if (ipFamily.equals("ipv6")) {
|
||||
ipv6 = ipPart;
|
||||
}
|
||||
}
|
||||
logger.debug(String.format("Found ipv4: %s and ipv6: %s with mac address %s", ipv4, ipv6, macAddress));
|
||||
return new Pair<>(ipv4, ipv6);
|
||||
}
|
||||
|
||||
private String ipFromDhcpLeaseFile(String sanitizedVmName, String networkCidr) {
|
||||
String ip = null;
|
||||
List<String[]> commands = new ArrayList<>();
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URISyntaxException;
|
||||
|
|
@ -39,12 +38,9 @@ import java.util.concurrent.TimeoutException;
|
|||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.transform.Transformer;
|
||||
import javax.xml.transform.TransformerException;
|
||||
import javax.xml.transform.TransformerFactory;
|
||||
import javax.xml.transform.dom.DOMSource;
|
||||
import javax.xml.transform.stream.StreamResult;
|
||||
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtXMLParser;
|
||||
import org.apache.cloudstack.utils.security.ParserUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.io.FilenameUtils;
|
||||
|
|
@ -457,7 +453,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
logger.info(String.format("VM [%s] will have CPU shares altered from [%s] to [%s] as part of migration because the cgroups version differs between hosts.",
|
||||
migrateCommand.getVmName(), currentShares, newVmCpuShares));
|
||||
sharesNode.setTextContent(String.valueOf(newVmCpuShares));
|
||||
return getXml(document);
|
||||
return LibvirtXMLParser.getXml(document);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -527,7 +523,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
}
|
||||
}
|
||||
|
||||
return getXml(doc);
|
||||
return LibvirtXMLParser.getXml(doc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -702,7 +698,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
}
|
||||
}
|
||||
|
||||
return getXml(doc);
|
||||
return LibvirtXMLParser.getXml(doc);
|
||||
}
|
||||
|
||||
private String getOldVolumePath(List<DiskDef> disks, String vmName) {
|
||||
|
|
@ -795,7 +791,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
newChildSourceNode.setAttribute("file", newIsoVolumePath);
|
||||
diskNode.appendChild(newChildSourceNode);
|
||||
logger.debug(String.format("Replaced ISO path [%s] with [%s] in VM [%s] XML configuration.", oldIsoVolumePath, newIsoVolumePath, vmName));
|
||||
return getXml(doc);
|
||||
return LibvirtXMLParser.getXml(doc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -804,7 +800,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
}
|
||||
}
|
||||
|
||||
return getXml(doc);
|
||||
return LibvirtXMLParser.getXml(doc);
|
||||
}
|
||||
|
||||
private String getPathFromSourceText(Set<String> paths, String sourceText) {
|
||||
|
|
@ -859,20 +855,6 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
return null;
|
||||
}
|
||||
|
||||
private String getXml(Document doc) throws TransformerException {
|
||||
TransformerFactory transformerFactory = ParserUtils.getSaferTransformerFactory();
|
||||
Transformer transformer = transformerFactory.newTransformer();
|
||||
|
||||
DOMSource source = new DOMSource(doc);
|
||||
|
||||
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
|
||||
StreamResult result = new StreamResult(byteArrayOutputStream);
|
||||
|
||||
transformer.transform(source, result);
|
||||
|
||||
return byteArrayOutputStream.toString();
|
||||
}
|
||||
|
||||
private String replaceDiskSourceFile(String xmlDesc, String isoPath, String vmName) throws IOException, SAXException, ParserConfigurationException, TransformerException {
|
||||
InputStream in = IOUtils.toInputStream(xmlDesc);
|
||||
|
||||
|
|
@ -895,7 +877,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
}
|
||||
}
|
||||
}
|
||||
return getXml(doc);
|
||||
return LibvirtXMLParser.getXml(doc);
|
||||
}
|
||||
|
||||
private boolean findDiskNode(Document doc, NodeList devicesChildNodes, String vmName, String isoPath) {
|
||||
|
|
|
|||
|
|
@ -38,6 +38,17 @@ public final class LibvirtModifyStoragePoolCommandWrapper extends CommandWrapper
|
|||
@Override
|
||||
public Answer execute(final ModifyStoragePoolCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
if (!command.getAdd()) {
|
||||
boolean status = storagePoolMgr.deleteStoragePool(command.getPool().getType(), command.getPool().getUuid(), command.getDetails());
|
||||
if (status) {
|
||||
final ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(command, true, null);
|
||||
return answer;
|
||||
}
|
||||
|
||||
final ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(command, false, "Failed to delete storage pool");
|
||||
return answer;
|
||||
}
|
||||
|
||||
final KVMStoragePool storagepool =
|
||||
storagePoolMgr.createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool()
|
||||
.getUserInfo(), command.getPool().getType(), command.getDetails());
|
||||
|
|
@ -47,7 +58,6 @@ public final class LibvirtModifyStoragePoolCommandWrapper extends CommandWrapper
|
|||
|
||||
final Map<String, TemplateProp> tInfo = new HashMap<String, TemplateProp>();
|
||||
final ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo, storagepool.getDetails());
|
||||
|
||||
return answer;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,48 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.RecreateCheckpointsCommand;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@ResourceWrapper(handles = RecreateCheckpointsCommand.class)
|
||||
public class LibvirtRecreateCheckpointsCommandWrapper extends CommandWrapper<RecreateCheckpointsCommand, Answer, LibvirtComputingResource> {
|
||||
@Override
|
||||
public Answer execute(RecreateCheckpointsCommand command, LibvirtComputingResource serverResource) {
|
||||
String vmName = command.getVmName();
|
||||
List<VolumeObjectTO> volumes = command.getDisks();
|
||||
|
||||
boolean result;
|
||||
try {
|
||||
result = serverResource.recreateCheckpointsOnVm(volumes, vmName, serverResource.getLibvirtUtilitiesHelper().getConnectionByVmName(vmName));
|
||||
} catch (LibvirtException e) {
|
||||
logger.error(String.format("Failed to recreate checkpoints on VM [%s] due to %s", vmName, e.getMessage()), e);
|
||||
return new Answer(command, e);
|
||||
}
|
||||
return new Answer(command, result, null);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,130 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.RemoveBitmapCommand;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.json.JSONArray;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import org.json.JSONObject;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
|
||||
@ResourceWrapper(handles = RemoveBitmapCommand.class)
|
||||
public class LibvirtRemoveBitmapCommandWrapper extends CommandWrapper<RemoveBitmapCommand, Answer, LibvirtComputingResource> {
|
||||
|
||||
private static final String QEMU_MONITOR_REMOVE_BITMAP_COMMAND = "{\"execute\": \"block-dirty-bitmap-remove\", \"arguments\":{\"node\":\"%s\",\"name\":\"%s\" }}";
|
||||
private static final String QEMU_MONITOR_QUERY_BLOCK_COMMAND = "{\"execute\": \"query-block\"}";
|
||||
|
||||
|
||||
@Override
|
||||
public Answer execute(RemoveBitmapCommand command, LibvirtComputingResource resource) {
|
||||
SnapshotObjectTO snapshotObjectTO = command.getSnapshotObjectTO();
|
||||
|
||||
try {
|
||||
if (command.isVmRunning()) {
|
||||
return removeBitmapForRunningVM(snapshotObjectTO, resource, command);
|
||||
}
|
||||
return removeBitmapForStoppedVM(snapshotObjectTO, resource, command);
|
||||
} catch (LibvirtException | QemuImgException exception) {
|
||||
logger.error("Exception while removing bitmap for volume [{}]. Caught exception is [{}].", snapshotObjectTO.getVolume().getName(), exception);
|
||||
return new Answer(command, exception);
|
||||
}
|
||||
}
|
||||
|
||||
protected Answer removeBitmapForRunningVM(SnapshotObjectTO snapshotObjectTO, LibvirtComputingResource resource, RemoveBitmapCommand cmd) throws LibvirtException {
|
||||
Domain vm = resource.getDomain(resource.getLibvirtUtilitiesHelper().getConnection(), snapshotObjectTO.getVmName());
|
||||
String nodeName = getNodeName(vm, snapshotObjectTO);
|
||||
logger.debug("Got [{}] as node-name for volume [{}] of VM [{}].", nodeName, snapshotObjectTO.getVolume().getName(), snapshotObjectTO.getVmName());
|
||||
if (nodeName == null) {
|
||||
return new Answer(cmd, false, "Failed to get node-name to remove the bitmap.");
|
||||
}
|
||||
|
||||
String bitmapName = getBitmapName(snapshotObjectTO);
|
||||
logger.debug("Removing bitmap [{}].", bitmapName);
|
||||
vm.qemuMonitorCommand(String.format(QEMU_MONITOR_REMOVE_BITMAP_COMMAND, nodeName, bitmapName), 0);
|
||||
return new Answer(cmd);
|
||||
}
|
||||
|
||||
protected Answer removeBitmapForStoppedVM(SnapshotObjectTO snapshotObjectTO, LibvirtComputingResource resource, Command cmd) throws LibvirtException, QemuImgException {
|
||||
VolumeObjectTO volumeTo = snapshotObjectTO.getVolume();
|
||||
PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeTo.getDataStore();
|
||||
|
||||
KVMStoragePool primaryPool = resource.getStoragePoolMgr().getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid());
|
||||
|
||||
QemuImg qemuImg = new QemuImg(cmd.getWait());
|
||||
QemuImgFile volume = new QemuImgFile(primaryPool.getLocalPath() + File.separator + volumeTo.getPath(), QemuImg.PhysicalDiskFormat.QCOW2);
|
||||
|
||||
String bitmap = getBitmapName(snapshotObjectTO);
|
||||
|
||||
logger.debug("Removing bitmap [{}] for volume [{}].", bitmap, volumeTo.getName());
|
||||
|
||||
try {
|
||||
qemuImg.bitmap(QemuImg.BitmapOperation.Remove, volume, bitmap);
|
||||
} catch (QemuImgException ex) {
|
||||
if (!(ex.getMessage().contains("Dirty bitmap") || ex.getMessage().contains("not found"))) {
|
||||
throw ex;
|
||||
}
|
||||
logger.warn("Could not delete dirty bitmap [{}] as it was not found. This will happen if the volume was migrated. If it is not the case, this should be reported.", bitmap);
|
||||
}
|
||||
return new Answer(cmd);
|
||||
}
|
||||
|
||||
|
||||
protected String getBitmapName(SnapshotObjectTO snapshotObjectTO) {
|
||||
String[] splitPath = snapshotObjectTO.getPath().split(File.separator);
|
||||
return splitPath[splitPath.length - 1];
|
||||
}
|
||||
|
||||
protected String getNodeName(Domain vm, SnapshotObjectTO snapshotObjectTO) throws LibvirtException {
|
||||
logger.debug("Getting nodeName to remove bitmap for volume [{}] of VM [{}].", snapshotObjectTO.getVolume().getName(), snapshotObjectTO.getVmName());
|
||||
String vmBlockInfo = vm.qemuMonitorCommand(QEMU_MONITOR_QUERY_BLOCK_COMMAND, 0);
|
||||
logger.debug("Parsing [{}]", vmBlockInfo);
|
||||
JSONObject jsonObj = new JSONObject(vmBlockInfo);
|
||||
JSONArray returnArray = jsonObj.getJSONArray("return");
|
||||
|
||||
for (int i = 0; i < returnArray.length(); i++) {
|
||||
JSONObject blockInfo = returnArray.getJSONObject(i);
|
||||
if (!blockInfo.has("inserted")) {
|
||||
continue;
|
||||
}
|
||||
JSONObject inserted = blockInfo.getJSONObject("inserted");
|
||||
String volumePath = inserted.getString("file");
|
||||
if (!volumePath.contains(snapshotObjectTO.getVolume().getPath())) {
|
||||
continue;
|
||||
}
|
||||
return inserted.getString("node-name");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -150,16 +150,16 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
String mountDirectory = String.format("%s.%s",BACKUP_TEMP_FILE_PREFIX , randomChars);
|
||||
try {
|
||||
mountDirectory = Files.createTempDirectory(mountDirectory).toString();
|
||||
String mountOpts = null;
|
||||
if (Objects.nonNull(mountOptions)) {
|
||||
mountOpts = mountOptions;
|
||||
if ("cifs".equals(backupRepoType)) {
|
||||
mountOpts += ",nobrl";
|
||||
String mount = String.format(MOUNT_COMMAND, backupRepoType, backupRepoAddress, mountDirectory);
|
||||
if ("cifs".equals(backupRepoType)) {
|
||||
if (Objects.isNull(mountOptions) || mountOptions.trim().isEmpty()) {
|
||||
mountOptions = "nobrl";
|
||||
} else {
|
||||
mountOptions += ",nobrl";
|
||||
}
|
||||
}
|
||||
String mount = String.format(MOUNT_COMMAND, backupRepoType, backupRepoAddress, mountDirectory);
|
||||
if (Objects.nonNull(mountOpts)) {
|
||||
mount += " -o " + mountOpts;
|
||||
if (Objects.nonNull(mountOptions) && !mountOptions.trim().isEmpty()) {
|
||||
mount += " -o " + mountOptions;
|
||||
}
|
||||
Script.runSimpleBashScript(mount);
|
||||
} catch (Exception e) {
|
||||
|
|
|
|||
|
|
@ -20,14 +20,14 @@
|
|||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
import java.util.Arrays;
|
||||
|
||||
import com.cloud.agent.properties.AgentProperties;
|
||||
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||
import org.apache.cloudstack.storage.command.RevertSnapshotCommand;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
|
|
@ -53,6 +53,10 @@ import com.cloud.storage.Storage.StoragePoolType;
|
|||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
@ResourceWrapper(handles = RevertSnapshotCommand.class)
|
||||
public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper<RevertSnapshotCommand, Answer, LibvirtComputingResource> {
|
||||
|
|
@ -80,7 +84,7 @@ public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper<RevertSn
|
|||
|
||||
String volumePath = volume.getPath();
|
||||
String snapshotRelPath = snapshot.getPath();
|
||||
|
||||
KVMStoragePool secondaryStoragePool = null;
|
||||
try {
|
||||
KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
|
||||
|
|
@ -109,7 +113,6 @@ public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper<RevertSn
|
|||
rbd.close(image);
|
||||
rados.ioCtxDestroy(io);
|
||||
} else {
|
||||
KVMStoragePool secondaryStoragePool = null;
|
||||
if (snapshotImageStore != null && DataStoreRole.Primary != snapshotImageStore.getRole()) {
|
||||
secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(snapshotImageStore.getUrl());
|
||||
}
|
||||
|
|
@ -125,7 +128,7 @@ public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper<RevertSn
|
|||
return new Answer(command, false, result);
|
||||
}
|
||||
} else {
|
||||
revertVolumeToSnapshot(snapshotOnPrimaryStorage, snapshot, snapshotImageStore, primaryPool, secondaryStoragePool);
|
||||
revertVolumeToSnapshot(secondaryStoragePool, snapshotOnPrimaryStorage, snapshot, primaryPool, libvirtComputingResource);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -138,7 +141,12 @@ public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper<RevertSn
|
|||
} catch (RbdException e) {
|
||||
logger.error("Failed to connect to revert snapshot due to RBD exception: ", e);
|
||||
return new Answer(command, false, e.toString());
|
||||
} finally {
|
||||
if (secondaryStoragePool != null) {
|
||||
libvirtComputingResource.getStoragePoolMgr().deleteStoragePool(secondaryStoragePool.getType(), secondaryStoragePool.getUuid());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -152,8 +160,8 @@ public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper<RevertSn
|
|||
/**
|
||||
* Reverts the volume to the snapshot.
|
||||
*/
|
||||
protected void revertVolumeToSnapshot(SnapshotObjectTO snapshotOnPrimaryStorage, SnapshotObjectTO snapshotOnSecondaryStorage, DataStoreTO dataStoreTo,
|
||||
KVMStoragePool kvmStoragePoolPrimary, KVMStoragePool kvmStoragePoolSecondary) {
|
||||
protected void revertVolumeToSnapshot(KVMStoragePool kvmStoragePoolSecondary, SnapshotObjectTO snapshotOnPrimaryStorage, SnapshotObjectTO snapshotOnSecondaryStorage,
|
||||
KVMStoragePool kvmStoragePoolPrimary, LibvirtComputingResource resource) {
|
||||
VolumeObjectTO volumeObjectTo = snapshotOnSecondaryStorage.getVolume();
|
||||
String volumePath = getFullPathAccordingToStorage(kvmStoragePoolPrimary, volumeObjectTo.getPath());
|
||||
|
||||
|
|
@ -161,13 +169,22 @@ public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper<RevertSn
|
|||
String snapshotPath = resultGetSnapshot.first();
|
||||
SnapshotObjectTO snapshotToPrint = resultGetSnapshot.second();
|
||||
|
||||
Set<KVMStoragePool> storagePoolSet = null;
|
||||
if (kvmStoragePoolSecondary != null) {
|
||||
storagePoolSet = resource.connectToAllVolumeSnapshotSecondaryStorages(volumeObjectTo);
|
||||
}
|
||||
|
||||
logger.debug(String.format("Reverting volume [%s] to snapshot [%s].", volumeObjectTo, snapshotToPrint));
|
||||
|
||||
try {
|
||||
replaceVolumeWithSnapshot(volumePath, snapshotPath);
|
||||
logger.debug(String.format("Successfully reverted volume [%s] to snapshot [%s].", volumeObjectTo, snapshotToPrint));
|
||||
} catch (IOException ex) {
|
||||
} catch (LibvirtException | QemuImgException ex) {
|
||||
throw new CloudRuntimeException(String.format("Unable to revert volume [%s] to snapshot [%s] due to [%s].", volumeObjectTo, snapshotToPrint, ex.getMessage()), ex);
|
||||
} finally {
|
||||
if (storagePoolSet != null) {
|
||||
resource.disconnectAllVolumeSnapshotSecondaryStorages(storagePoolSet);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -208,9 +225,15 @@ public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper<RevertSn
|
|||
|
||||
/**
|
||||
* Replaces the current volume with the snapshot.
|
||||
* @throws IOException If can't replace the current volume with the snapshot.
|
||||
* @throws LibvirtException If can't replace the current volume with the snapshot.
|
||||
* @throws QemuImgException If can't replace the current volume with the snapshot.
|
||||
*/
|
||||
protected void replaceVolumeWithSnapshot(String volumePath, String snapshotPath) throws IOException {
|
||||
Files.copy(Paths.get(snapshotPath), Paths.get(volumePath), StandardCopyOption.REPLACE_EXISTING);
|
||||
protected void replaceVolumeWithSnapshot(String volumePath, String snapshotPath) throws LibvirtException, QemuImgException {
|
||||
logger.debug(String.format("Replacing volume at [%s] with snapshot that is at [%s].", volumePath, snapshotPath));
|
||||
QemuImg qemuImg = new QemuImg(AgentPropertiesFileHandler.getPropertyValue(AgentProperties.REVERT_SNAPSHOT_TIMEOUT) * 1000);
|
||||
QemuImgFile volumeImg = new QemuImgFile(volumePath, QemuImg.PhysicalDiskFormat.QCOW2);
|
||||
QemuImgFile snapshotImg = new QemuImgFile(snapshotPath, QemuImg.PhysicalDiskFormat.QCOW2);
|
||||
|
||||
qemuImg.convert(snapshotImg, volumeImg);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,9 +21,13 @@ package com.cloud.hypervisor.kvm.resource.wrapper;
|
|||
|
||||
import java.io.File;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import com.cloud.agent.resource.virtualnetwork.VRScripts;
|
||||
import com.cloud.utils.FileUtil;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.DomainInfo.DomainState;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
|
@ -89,6 +93,12 @@ public final class LibvirtStartCommandWrapper extends CommandWrapper<StartComman
|
|||
|
||||
libvirtComputingResource.applyDefaultNetworkRules(conn, vmSpec, false);
|
||||
|
||||
if (vmSpec.getType() == VirtualMachine.Type.User) {
|
||||
List<VolumeObjectTO> volumes = Arrays.stream(vmSpec.getDisks()).filter(diskTO -> diskTO.getData() instanceof VolumeObjectTO).
|
||||
map(diskTO -> (VolumeObjectTO) diskTO.getData()).collect(Collectors.toList());
|
||||
libvirtComputingResource.recreateCheckpointsOnVm(volumes, vmName, conn);
|
||||
}
|
||||
|
||||
// pass cmdline info to system vms
|
||||
if (vmSpec.getType() != VirtualMachine.Type.User || (vmSpec.getBootArgs() != null && (vmSpec.getBootArgs().contains(UserVmManager.CKS_NODE) || vmSpec.getBootArgs().contains(UserVmManager.SHAREDFSVM)))) {
|
||||
// try to patch and SSH into the systemvm for up to 5 minutes
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ public class LibvirtUnprepareStorageClientCommandWrapper extends CommandWrapper<
|
|||
@Override
|
||||
public Answer execute(UnprepareStorageClientCommand cmd, LibvirtComputingResource libvirtComputingResource) {
|
||||
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
Pair<Boolean, String> unprepareStorageClientResult = storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid());
|
||||
Pair<Boolean, String> unprepareStorageClientResult = storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid(), cmd.getDetails());
|
||||
if (!unprepareStorageClientResult.first()) {
|
||||
String msg = unprepareStorageClientResult.second();
|
||||
logger.debug("Couldn't unprepare storage client, due to: " + msg);
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.storage;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
|
@ -100,6 +101,10 @@ public interface KVMStoragePool {
|
|||
|
||||
public Map<String, String> getDetails();
|
||||
|
||||
default String getLocalPathFor(String relativePath) {
|
||||
return String.format("%s%s%s", getLocalPath(), File.separator, relativePath);
|
||||
}
|
||||
|
||||
public boolean isPoolSupportHA();
|
||||
|
||||
public String getHearthBeatPath();
|
||||
|
|
|
|||
|
|
@ -408,12 +408,26 @@ public class KVMStoragePoolManager {
|
|||
|
||||
public boolean deleteStoragePool(StoragePoolType type, String uuid) {
|
||||
StorageAdaptor adaptor = getStorageAdaptor(type);
|
||||
_haMonitor.removeStoragePool(uuid);
|
||||
adaptor.deleteStoragePool(uuid);
|
||||
if (type == StoragePoolType.NetworkFilesystem) {
|
||||
_haMonitor.removeStoragePool(uuid);
|
||||
}
|
||||
boolean deleteStatus = adaptor.deleteStoragePool(uuid);;
|
||||
synchronized (_storagePools) {
|
||||
_storagePools.remove(uuid);
|
||||
}
|
||||
return true;
|
||||
return deleteStatus;
|
||||
}
|
||||
|
||||
public boolean deleteStoragePool(StoragePoolType type, String uuid, Map<String, String> details) {
|
||||
StorageAdaptor adaptor = getStorageAdaptor(type);
|
||||
if (type == StoragePoolType.NetworkFilesystem) {
|
||||
_haMonitor.removeStoragePool(uuid);
|
||||
}
|
||||
boolean deleteStatus = adaptor.deleteStoragePool(uuid, details);
|
||||
synchronized (_storagePools) {
|
||||
_storagePools.remove(uuid);
|
||||
}
|
||||
return deleteStatus;
|
||||
}
|
||||
|
||||
public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, Storage.ProvisioningType provisioningType,
|
||||
|
|
@ -477,11 +491,11 @@ public class KVMStoragePoolManager {
|
|||
|
||||
public Ternary<Boolean, Map<String, String>, String> prepareStorageClient(StoragePoolType type, String uuid, Map<String, String> details) {
|
||||
StorageAdaptor adaptor = getStorageAdaptor(type);
|
||||
return adaptor.prepareStorageClient(type, uuid, details);
|
||||
return adaptor.prepareStorageClient(uuid, details);
|
||||
}
|
||||
|
||||
public Pair<Boolean, String> unprepareStorageClient(StoragePoolType type, String uuid) {
|
||||
public Pair<Boolean, String> unprepareStorageClient(StoragePoolType type, String uuid, Map<String, String> details) {
|
||||
StorageAdaptor adaptor = getStorageAdaptor(type);
|
||||
return adaptor.unprepareStorageClient(type, uuid);
|
||||
return adaptor.unprepareStorageClient(uuid, details);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,9 +25,11 @@ import java.io.File;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.attribute.PosixFilePermissions;
|
||||
import java.text.DateFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
|
|
@ -42,8 +44,17 @@ import java.util.UUID;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.naming.ConfigurationException;
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.transform.TransformerException;
|
||||
import javax.xml.xpath.XPath;
|
||||
import javax.xml.xpath.XPathConstants;
|
||||
import javax.xml.xpath.XPathExpressionException;
|
||||
import javax.xml.xpath.XPathFactory;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtXMLParser;
|
||||
import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer;
|
||||
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand;
|
||||
import org.apache.cloudstack.direct.download.DirectDownloadHelper;
|
||||
|
|
@ -79,9 +90,12 @@ import org.apache.cloudstack.utils.qemu.QemuImgException;
|
|||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.cloudstack.utils.qemu.QemuObject;
|
||||
import org.apache.cloudstack.utils.qemu.QemuObject.EncryptFormat;
|
||||
import org.apache.cloudstack.utils.security.ParserUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.BooleanUtils;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||
import org.apache.commons.lang3.builder.ToStringStyle;
|
||||
|
|
@ -143,7 +157,10 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
import com.cloud.utils.script.Script;
|
||||
import com.cloud.utils.storage.S3.S3Utils;
|
||||
import com.cloud.vm.VmDetailConstants;
|
||||
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Node;
|
||||
import org.w3c.dom.NodeList;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
public class KVMStorageProcessor implements StorageProcessor {
|
||||
protected Logger logger = LogManager.getLogger(getClass());
|
||||
|
|
@ -165,6 +182,47 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
*/
|
||||
private final long waitDelayForVirshCommands = 1000L;
|
||||
|
||||
private int incrementalSnapshotTimeout;
|
||||
|
||||
private static final String CHECKPOINT_XML_TEMP_DIR = "/tmp/cloudstack/checkpointXMLs";
|
||||
|
||||
private static final String BACKUP_XML_TEMP_DIR = "/tmp/cloudstack/backupXMLs";
|
||||
|
||||
private static final String BACKUP_BEGIN_COMMAND = "virsh backup-begin --domain %s --backupxml %s --checkpointxml %s";
|
||||
|
||||
private static final String BACKUP_XML = "<domainbackup><disks><disk name='%s' type='file'><target file='%s'/><driver type='qcow2'/></disk></disks></domainbackup>";
|
||||
|
||||
private static final String INCREMENTAL_BACKUP_XML = "<domainbackup><incremental>%s</incremental><disks><disk name='%s' type='file'><target file='%s'/><driver type='qcow2'/></disk></disks></domainbackup>";
|
||||
|
||||
private static final String CHECKPOINT_XML = "<domaincheckpoint><name>%s</name><disks><disk name='%s' checkpoint='bitmap'/></disks></domaincheckpoint>";
|
||||
|
||||
private static final String CHECKPOINT_DUMP_XML_COMMAND = "virsh checkpoint-dumpxml --domain %s --checkpointname %s --no-domain";
|
||||
|
||||
private static final String DOMJOBINFO_COMPLETED_COMMAND = "virsh domjobinfo --domain %s --completed";
|
||||
|
||||
private static final String DOMJOBABORT_COMMAND = "virsh domjobabort --domain %s";
|
||||
|
||||
private static final String DUMMY_VM_XML = "<domain type='qemu'>\n" +
|
||||
" <name>%s</name>\n" +
|
||||
" <memory unit='MiB'>256</memory>\n" +
|
||||
" <currentMemory unit='MiB'>256</currentMemory>\n" +
|
||||
" <vcpu>1</vcpu>\n" +
|
||||
" <os>\n" +
|
||||
" <type arch='%s' machine='%s'>hvm</type>\n" +
|
||||
" <boot dev='hd'/>\n" +
|
||||
" </os>\n" +
|
||||
" <devices>\n" +
|
||||
" <emulator>%s</emulator>\n" +
|
||||
" <disk type='file' device='disk'>\n" +
|
||||
" <driver name='qemu' type='qcow2' cache='none'/>\n"+
|
||||
" <source file='%s'/>\n" +
|
||||
" <target dev='sda'/>\n" +
|
||||
" </disk>\n" +
|
||||
" <graphics type='vnc' port='-1'/>\n" +
|
||||
" </devices>\n" +
|
||||
"</domain>";
|
||||
|
||||
|
||||
public KVMStorageProcessor(final KVMStoragePoolManager storagePoolMgr, final LibvirtComputingResource resource) {
|
||||
this.storagePoolMgr = storagePoolMgr;
|
||||
this.resource = resource;
|
||||
|
|
@ -191,6 +249,8 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
|
||||
_cmdsTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.CMDS_TIMEOUT) * 1000;
|
||||
|
||||
incrementalSnapshotTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.INCREMENTAL_SNAPSHOT_TIMEOUT) * 1000;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -1572,6 +1632,8 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
vol.getIopsReadRate(), vol.getIopsReadRateMax(), vol.getIopsReadRateMaxLength(),
|
||||
vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, encryptDetails, disk.getDetails());
|
||||
|
||||
resource.recreateCheckpointsOnVm(List.of((VolumeObjectTO) disk.getData()), vmName, conn);
|
||||
|
||||
return new AttachAnswer(disk);
|
||||
} catch (final LibvirtException e) {
|
||||
logger.debug(String.format("Failed to attach volume [id: %d, uuid: %s, name: %s, path: %s], due to ",
|
||||
|
|
@ -1611,6 +1673,8 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
|
||||
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
|
||||
|
||||
resource.removeCheckpointsOnVm(vmName, vol.getUuid(), vol.getCheckpointPaths());
|
||||
|
||||
return new DettachAnswer(disk);
|
||||
} catch (final LibvirtException | InternalErrorException | CloudRuntimeException e) {
|
||||
logger.debug(String.format("Failed to detach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e);
|
||||
|
|
@ -1756,6 +1820,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
public Answer createSnapshot(final CreateObjectCommand cmd) {
|
||||
final SnapshotObjectTO snapshotTO = (SnapshotObjectTO)cmd.getData();
|
||||
final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)snapshotTO.getDataStore();
|
||||
DataStoreTO imageStoreTo = snapshotTO.getImageStore();
|
||||
final VolumeObjectTO volume = snapshotTO.getVolume();
|
||||
final String snapshotName = UUID.randomUUID().toString();
|
||||
final String vmName = volume.getVmName();
|
||||
|
|
@ -1773,101 +1838,45 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && volume.requiresEncryption()) {
|
||||
if (DomainInfo.DomainState.VIR_DOMAIN_RUNNING.equals(state) && volume.requiresEncryption()) {
|
||||
throw new CloudRuntimeException("VM is running, encrypted volume snapshots aren't supported");
|
||||
}
|
||||
|
||||
final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
|
||||
KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
|
||||
KVMStoragePool secondaryPool = imageStoreTo != null ? storagePoolMgr.getStoragePoolByURI(imageStoreTo.getUrl()) : null;
|
||||
|
||||
final KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), volume.getPath());
|
||||
KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), volume.getPath());
|
||||
|
||||
String diskPath = disk.getPath();
|
||||
String snapshotPath = diskPath + File.separator + snapshotName;
|
||||
if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && !primaryPool.isExternalSnapshot()) {
|
||||
|
||||
validateAvailableSizeOnPoolToTakeVolumeSnapshot(primaryPool, disk);
|
||||
|
||||
try {
|
||||
snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName);
|
||||
|
||||
String diskLabel = takeVolumeSnapshot(resource.getDisks(conn, vmName), snapshotName, diskPath, vm);
|
||||
String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, disk, snapshotPath, volume, cmd.getWait());
|
||||
|
||||
mergeSnapshotIntoBaseFile(vm, diskLabel, diskPath, snapshotName, volume, conn);
|
||||
|
||||
validateConvertResult(convertResult, snapshotPath);
|
||||
} catch (LibvirtException e) {
|
||||
if (!e.getMessage().contains(LIBVIRT_OPERATION_NOT_SUPPORTED_MESSAGE)) {
|
||||
throw e;
|
||||
}
|
||||
|
||||
logger.info(String.format("It was not possible to take live disk snapshot for volume [%s], in VM [%s], due to [%s]. We will take full snapshot of the VM"
|
||||
+ " and extract the disk instead. Consider upgrading your QEMU binary.", volume, vmName, e.getMessage()));
|
||||
|
||||
takeFullVmSnapshotForBinariesThatDoesNotSupportLiveDiskSnapshot(vm, snapshotName, vmName);
|
||||
primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR);
|
||||
extractDiskFromFullVmSnapshot(disk, volume, snapshotPath, snapshotName, vmName, vm);
|
||||
}
|
||||
|
||||
/*
|
||||
* libvirt on RHEL6 doesn't handle resume event emitted from
|
||||
* qemu
|
||||
*/
|
||||
vm = resource.getDomain(conn, vmName);
|
||||
state = vm.getInfo().state;
|
||||
if (state == DomainInfo.DomainState.VIR_DOMAIN_PAUSED) {
|
||||
vm.resume();
|
||||
SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
|
||||
if (DomainInfo.DomainState.VIR_DOMAIN_RUNNING.equals(state) && !primaryPool.isExternalSnapshot()) {
|
||||
if (snapshotTO.isKvmIncrementalSnapshot()) {
|
||||
newSnapshot = takeIncrementalVolumeSnapshotOfRunningVm(snapshotTO, primaryPool, secondaryPool, imageStoreTo != null ? imageStoreTo.getUrl() : null, snapshotName, volume, vm, conn, cmd.getWait());
|
||||
} else {
|
||||
newSnapshot = takeFullVolumeSnapshotOfRunningVm(cmd, primaryPool, secondaryPool, disk, snapshotName, conn, vmName, diskPath, vm, volume, snapshotPath);
|
||||
}
|
||||
} else {
|
||||
/**
|
||||
* For RBD we can't use libvirt to do our snapshotting or any Bash scripts.
|
||||
* libvirt also wants to store the memory contents of the Virtual Machine,
|
||||
* but that's not possible with RBD since there is no way to store the memory
|
||||
* contents in RBD.
|
||||
*
|
||||
* So we rely on the Java bindings for RBD to create our snapshot
|
||||
*
|
||||
* This snapshot might not be 100% consistent due to writes still being in the
|
||||
* memory of the Virtual Machine, but if the VM runs a kernel which supports
|
||||
* barriers properly (>2.6.32) this won't be any different then pulling the power
|
||||
* cord out of a running machine.
|
||||
*/
|
||||
if (primaryPool.getType() == StoragePoolType.RBD) {
|
||||
try {
|
||||
Rados r = radosConnect(primaryPool);
|
||||
|
||||
final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir());
|
||||
final Rbd rbd = new Rbd(io);
|
||||
final RbdImage image = rbd.open(disk.getName());
|
||||
|
||||
logger.debug("Attempting to create RBD snapshot " + disk.getName() + "@" + snapshotName);
|
||||
image.snapCreate(snapshotName);
|
||||
|
||||
rbd.close(image);
|
||||
r.ioCtxDestroy(io);
|
||||
} catch (final Exception e) {
|
||||
logger.error("A RBD snapshot operation on " + disk.getName() + " failed. The error was: " + e.getMessage());
|
||||
}
|
||||
takeRbdVolumeSnapshotOfStoppedVm(primaryPool, disk, snapshotName);
|
||||
newSnapshot.setPath(snapshotPath);
|
||||
} else if (primaryPool.getType() == StoragePoolType.CLVM) {
|
||||
/* VM is not running, create a snapshot by ourself */
|
||||
final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, logger);
|
||||
command.add(MANAGE_SNAPSTHOT_CREATE_OPTION, disk.getPath());
|
||||
command.add(NAME_OPTION, snapshotName);
|
||||
final String result = command.execute();
|
||||
if (result != null) {
|
||||
logger.debug("Failed to manage snapshot: " + result);
|
||||
return new CreateObjectAnswer("Failed to manage snapshot: " + result);
|
||||
}
|
||||
CreateObjectAnswer result = takeClvmVolumeSnapshotOfStoppedVm(disk, snapshotName);
|
||||
if (result != null) return result;
|
||||
newSnapshot.setPath(snapshotPath);
|
||||
} else {
|
||||
snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName);
|
||||
String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, disk, snapshotPath, volume, cmd.getWait());
|
||||
validateConvertResult(convertResult, snapshotPath);
|
||||
if (snapshotTO.isKvmIncrementalSnapshot()) {
|
||||
newSnapshot = takeIncrementalVolumeSnapshotOfStoppedVm(snapshotTO, primaryPool, secondaryPool, imageStoreTo != null ? imageStoreTo.getUrl() : null, snapshotName, volume, conn, cmd.getWait());
|
||||
} else {
|
||||
newSnapshot = takeFullVolumeSnapshotOfStoppedVm(cmd, primaryPool, secondaryPool, snapshotName, disk, volume);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
|
||||
if (secondaryPool != null) {
|
||||
storagePoolMgr.deleteStoragePool(secondaryPool.getType(), secondaryPool.getUuid());
|
||||
}
|
||||
|
||||
newSnapshot.setPath(snapshotPath);
|
||||
return new CreateObjectAnswer(newSnapshot);
|
||||
} catch (CloudRuntimeException | LibvirtException | IOException ex) {
|
||||
String errorMsg = String.format("Failed take snapshot for volume [%s], in VM [%s], due to [%s].", volume, vmName, ex.getMessage());
|
||||
|
|
@ -1878,6 +1887,504 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
private SnapshotObjectTO createSnapshotToAndUpdatePathAndSize(String path, String fullPath) {
|
||||
final File snapFile = new File(fullPath);
|
||||
long size = 0;
|
||||
|
||||
if (snapFile.exists()) {
|
||||
size = snapFile.length();
|
||||
}
|
||||
|
||||
SnapshotObjectTO snapshotObjectTo = new SnapshotObjectTO();
|
||||
|
||||
snapshotObjectTo.setPath(path);
|
||||
snapshotObjectTo.setPhysicalSize(size);
|
||||
|
||||
return snapshotObjectTo;
|
||||
}
|
||||
|
||||
private SnapshotObjectTO takeIncrementalVolumeSnapshotOfStoppedVm(SnapshotObjectTO snapshotObjectTO, KVMStoragePool primaryPool, KVMStoragePool secondaryPool,
|
||||
String secondaryPoolUrl, String snapshotName, VolumeObjectTO volumeObjectTo, Connect conn, int wait) throws LibvirtException {
|
||||
resource.validateLibvirtAndQemuVersionForIncrementalSnapshots();
|
||||
Domain vm = null;
|
||||
logger.debug("Taking incremental volume snapshot of volume [{}]. Snapshot will be copied to [{}].", volumeObjectTo,
|
||||
ObjectUtils.defaultIfNull(secondaryPool, primaryPool));
|
||||
try {
|
||||
String vmName = String.format("DUMMY-VM-%s", snapshotName);
|
||||
|
||||
String vmXml = getVmXml(primaryPool, volumeObjectTo, vmName);
|
||||
|
||||
logger.debug("Creating dummy VM with volume [{}] to take an incremental snapshot of it.", volumeObjectTo);
|
||||
resource.startVM(conn, vmName, vmXml, Domain.CreateFlags.PAUSED);
|
||||
|
||||
vm = resource.getDomain(conn, vmName);
|
||||
|
||||
resource.recreateCheckpointsOnVm(List.of(volumeObjectTo), vmName, conn);
|
||||
|
||||
return takeIncrementalVolumeSnapshotOfRunningVm(snapshotObjectTO, primaryPool, secondaryPool, secondaryPoolUrl, snapshotName, volumeObjectTo, vm, conn, wait);
|
||||
} catch (InternalErrorException | LibvirtException | CloudRuntimeException e) {
|
||||
logger.error("Failed to take incremental volume snapshot of volume [{}] due to {}.", volumeObjectTo, e.getMessage(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
} finally {
|
||||
if (vm != null) {
|
||||
vm.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String getVmXml(KVMStoragePool primaryPool, VolumeObjectTO volumeObjectTo, String vmName) {
|
||||
String machine = resource.isGuestAarch64() ? LibvirtComputingResource.VIRT : LibvirtComputingResource.PC;
|
||||
String cpuArch = resource.getGuestCpuArch() != null ? resource.getGuestCpuArch() : "x86_64";
|
||||
|
||||
return String.format(DUMMY_VM_XML, vmName, cpuArch, machine, resource.getHypervisorPath(), primaryPool.getLocalPathFor(volumeObjectTo.getPath()));
|
||||
}
|
||||
|
||||
private SnapshotObjectTO takeIncrementalVolumeSnapshotOfRunningVm(SnapshotObjectTO snapshotObjectTO, KVMStoragePool primaryPool, KVMStoragePool secondaryPool,
|
||||
String secondaryPoolUrl, String snapshotName, VolumeObjectTO volumeObjectTo, Domain vm, Connect conn, int wait) {
|
||||
logger.debug("Taking incremental volume snapshot of volume [{}] attached to running VM [{}]. Snapshot will be copied to [{}].", volumeObjectTo, volumeObjectTo.getVmName(),
|
||||
ObjectUtils.defaultIfNull(secondaryPool, primaryPool));
|
||||
resource.validateLibvirtAndQemuVersionForIncrementalSnapshots();
|
||||
|
||||
Pair<String, String> fullSnapshotPathAndDirPath = getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(primaryPool, secondaryPool, snapshotName, volumeObjectTo, false);
|
||||
|
||||
String diskLabel;
|
||||
String vmName;
|
||||
try {
|
||||
List<DiskDef> disks = resource.getDisks(conn, vm.getName());
|
||||
diskLabel = getDiskLabelToSnapshot(disks, volumeObjectTo.getPath(), vm);
|
||||
vmName = vm.getName();
|
||||
} catch (LibvirtException e) {
|
||||
logger.error("Failed to get VM's disks or VM name due to: [{}].", e.getMessage(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
|
||||
String[] parents = snapshotObjectTO.getParents();
|
||||
String fullSnapshotPath = fullSnapshotPathAndDirPath.first();
|
||||
|
||||
String backupXml = generateBackupXml(volumeObjectTo, parents, diskLabel, fullSnapshotPath);
|
||||
String checkpointXml = String.format(CHECKPOINT_XML, snapshotName, diskLabel);
|
||||
|
||||
Path backupXmlPath = createFileAndWrite(backupXml, BACKUP_XML_TEMP_DIR, snapshotName);
|
||||
Path checkpointXmlPath = createFileAndWrite(checkpointXml, CHECKPOINT_XML_TEMP_DIR, snapshotName);
|
||||
|
||||
String backupCommand = String.format(BACKUP_BEGIN_COMMAND, vmName, backupXmlPath.toString(), checkpointXmlPath.toString());
|
||||
|
||||
createFolderOnCorrectStorage(primaryPool, secondaryPool, fullSnapshotPathAndDirPath);
|
||||
|
||||
if (Script.runSimpleBashScript(backupCommand) == null) {
|
||||
throw new CloudRuntimeException(String.format("Error backing up using backupXML [%s], checkpointXML [%s] for volume [%s].", backupXml, checkpointXml,
|
||||
volumeObjectTo));
|
||||
}
|
||||
|
||||
try {
|
||||
waitForBackup(vmName);
|
||||
} catch (CloudRuntimeException ex) {
|
||||
cancelBackupJob(snapshotObjectTO);
|
||||
throw ex;
|
||||
}
|
||||
|
||||
rebaseSnapshot(snapshotObjectTO, secondaryPool, secondaryPoolUrl, fullSnapshotPath, snapshotName, parents, wait);
|
||||
|
||||
try {
|
||||
Files.setPosixFilePermissions(Path.of(fullSnapshotPath), PosixFilePermissions.fromString("rw-r--r--"));
|
||||
} catch (IOException ex) {
|
||||
logger.warn("Failed to change permissions of snapshot [{}], snapshot download will not be possible.", snapshotName);
|
||||
}
|
||||
|
||||
String checkpointPath = dumpCheckpoint(primaryPool, secondaryPool, snapshotName, volumeObjectTo, vmName, parents);
|
||||
|
||||
SnapshotObjectTO result = createSnapshotToAndUpdatePathAndSize(secondaryPool == null ? fullSnapshotPath : fullSnapshotPathAndDirPath.second() + File.separator + snapshotName,
|
||||
fullSnapshotPath);
|
||||
|
||||
result.setCheckpointPath(checkpointPath);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
protected void createFolderOnCorrectStorage(KVMStoragePool primaryPool, KVMStoragePool secondaryPool, Pair<String, String> fullSnapshotPathAndDirPath) {
|
||||
if (secondaryPool == null) {
|
||||
primaryPool.createFolder(fullSnapshotPathAndDirPath.second());
|
||||
} else {
|
||||
secondaryPool.createFolder(fullSnapshotPathAndDirPath.second());
|
||||
}
|
||||
}
|
||||
|
||||
protected String generateBackupXml(VolumeObjectTO volumeObjectTo, String[] parents, String diskLabel, String fullSnapshotPath) {
|
||||
if (parents == null) {
|
||||
logger.debug("Snapshot of volume [{}] does not have a parent, taking a full snapshot.", volumeObjectTo);
|
||||
return String.format(BACKUP_XML, diskLabel, fullSnapshotPath);
|
||||
} else {
|
||||
logger.debug("Snapshot of volume [{}] has parents [{}], taking an incremental snapshot.", volumeObjectTo, Arrays.toString(parents));
|
||||
String parentCheckpointName = getParentCheckpointName(parents);
|
||||
return String.format(INCREMENTAL_BACKUP_XML, parentCheckpointName, diskLabel, fullSnapshotPath);
|
||||
}
|
||||
}
|
||||
|
||||
private void waitForBackup(String vmName) throws CloudRuntimeException {
|
||||
int timeout = incrementalSnapshotTimeout;
|
||||
logger.debug("Waiting for backup of VM [{}] to finish, timeout is [{}].", vmName, timeout);
|
||||
|
||||
String result;
|
||||
|
||||
while (timeout > 0) {
|
||||
result = checkBackupJob(vmName);
|
||||
|
||||
if (result.contains("Completed") && result.contains("Backup")) {
|
||||
return;
|
||||
}
|
||||
|
||||
timeout -= 10000;
|
||||
try {
|
||||
Thread.sleep(10000);
|
||||
} catch (InterruptedException e) {
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
throw new CloudRuntimeException(String.format("Timeout while waiting for incremental snapshot for VM [%s] to finish.", vmName));
|
||||
}
|
||||
|
||||
private void cancelBackupJob(SnapshotObjectTO snapshotObjectTO) {
|
||||
Script.runSimpleBashScript(String.format(DOMJOBABORT_COMMAND, snapshotObjectTO.getVmName()));
|
||||
|
||||
String result = checkBackupJob(snapshotObjectTO.getVmName());
|
||||
|
||||
if (result.contains("Backup") && result.contains("Cancelled")) {
|
||||
logger.debug("Successfully canceled incremental snapshot job.");
|
||||
} else {
|
||||
logger.warn("Couldn't cancel the incremental snapshot job correctly. Job status is [{}].", result);
|
||||
}
|
||||
}
|
||||
|
||||
private String checkBackupJob(String vmName) {
|
||||
return Script.runSimpleBashScriptWithFullResult(String.format(DOMJOBINFO_COMPLETED_COMMAND, vmName), 10);
|
||||
}
|
||||
|
||||
protected void rebaseSnapshot(SnapshotObjectTO snapshotObjectTO, KVMStoragePool secondaryPool, String secondaryUrl, String snapshotPath, String snapshotName, String[] parents, int wait) {
|
||||
if (parents == null) {
|
||||
logger.debug("No need to rebase snapshot [{}], this snapshot has no parents, therefore it is the first on its backing chain.", snapshotName);
|
||||
return;
|
||||
}
|
||||
String parentSnapshotPath;
|
||||
|
||||
if (secondaryPool == null) {
|
||||
parentSnapshotPath = parents[parents.length - 1];
|
||||
} else if (!secondaryUrl.equals(snapshotObjectTO.getParentStore().getUrl())) {
|
||||
KVMStoragePool parentPool = storagePoolMgr.getStoragePoolByURI(snapshotObjectTO.getParentStore().getUrl());
|
||||
parentSnapshotPath = parentPool.getLocalPath() + File.separator + parents[parents.length - 1];
|
||||
storagePoolMgr.deleteStoragePool(parentPool.getType(), parentPool.getUuid());
|
||||
} else {
|
||||
parentSnapshotPath = secondaryPool.getLocalPath() + File.separator + parents[parents.length - 1];
|
||||
}
|
||||
|
||||
QemuImgFile snapshotFile = new QemuImgFile(snapshotPath);
|
||||
QemuImgFile parentSnapshotFile = new QemuImgFile(parentSnapshotPath);
|
||||
|
||||
logger.debug("Rebasing snapshot [{}] with parent [{}].", snapshotName, parentSnapshotPath);
|
||||
|
||||
try {
|
||||
QemuImg qemuImg = new QemuImg(wait);
|
||||
qemuImg.rebase(snapshotFile, parentSnapshotFile, PhysicalDiskFormat.QCOW2.toString(), false);
|
||||
} catch (LibvirtException | QemuImgException e) {
|
||||
logger.error("Exception while rebasing incremental snapshot [{}] due to: [{}].", snapshotName, e.getMessage(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
protected String getParentCheckpointName(String[] parents) {
|
||||
String immediateParentPath = parents[parents.length - 1];
|
||||
return immediateParentPath.substring(immediateParentPath.lastIndexOf(File.separator) + 1);
|
||||
}
|
||||
|
||||
private Path createFileAndWrite(String content, String dir, String fileName) {
|
||||
File dirFile = new File(dir);
|
||||
if (!dirFile.exists()) {
|
||||
dirFile.mkdirs();
|
||||
}
|
||||
|
||||
Path filePath = Path.of(dirFile.getPath(), fileName);
|
||||
try {
|
||||
return Files.write(filePath, content.getBytes());
|
||||
} catch (IOException ex) {
|
||||
String message = String.format("Error while writing file [%s].", filePath);
|
||||
logger.error(message, ex);
|
||||
throw new CloudRuntimeException(message, ex);
|
||||
}
|
||||
}
|
||||
|
||||
private String dumpCheckpoint(KVMStoragePool primaryPool, KVMStoragePool secondaryPool, String snapshotName, VolumeObjectTO volumeObjectTo, String vmName, String[] snapshotParents) {
|
||||
String result = Script.runSimpleBashScriptWithFullResult(String.format(CHECKPOINT_DUMP_XML_COMMAND, vmName, snapshotName), 10);
|
||||
|
||||
String snapshotParent = null;
|
||||
if (snapshotParents != null) {
|
||||
String snapshotParentPath = snapshotParents[snapshotParents.length - 1];
|
||||
snapshotParent = snapshotParentPath.substring(snapshotParentPath.lastIndexOf(File.separator) + 1);
|
||||
}
|
||||
|
||||
return cleanupCheckpointXmlDumpCheckpointAndRedefine(result, primaryPool, secondaryPool, snapshotName, volumeObjectTo, snapshotParent, vmName);
|
||||
}
|
||||
|
||||
private String cleanupCheckpointXmlDumpCheckpointAndRedefine(String checkpointXml, KVMStoragePool primaryPool, KVMStoragePool secondaryPool, String snapshotName, VolumeObjectTO volumeObjectTo, String snapshotParent, String vmName) {
|
||||
String updatedCheckpointXml;
|
||||
try {
|
||||
updatedCheckpointXml = updateCheckpointXml(checkpointXml, snapshotParent);
|
||||
} catch (TransformerException | ParserConfigurationException | IOException | SAXException |
|
||||
XPathExpressionException e) {
|
||||
logger.error("Exception while parsing checkpoint XML [{}].", checkpointXml, e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
|
||||
Pair<String, String> checkpointFullPathAndDirPath = getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(primaryPool, secondaryPool, snapshotName, volumeObjectTo, true);
|
||||
|
||||
String fullPath = checkpointFullPathAndDirPath.first();
|
||||
String dirPath = checkpointFullPathAndDirPath.second();
|
||||
|
||||
KVMStoragePool workPool = ObjectUtils.defaultIfNull(secondaryPool, primaryPool);
|
||||
workPool.createFolder(dirPath);
|
||||
|
||||
logger.debug("Saving checkpoint of volume [{}], attached to VM [{}], referring to snapshot [{}] to path [{}].", volumeObjectTo, vmName, snapshotName, fullPath);
|
||||
createFileAndWrite(updatedCheckpointXml, workPool.getLocalPath() + File.separator + dirPath, snapshotName);
|
||||
|
||||
logger.debug("Redefining checkpoint on VM [{}].", vmName);
|
||||
Script.runSimpleBashScript(String.format(LibvirtComputingResource.CHECKPOINT_CREATE_COMMAND, vmName, fullPath));
|
||||
|
||||
return fullPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the checkpoint XML, setting the parent to {@code snapshotParent} and removing any disks that were not backed up.
|
||||
* @param checkpointXml checkpoint XML to be parsed
|
||||
* @param snapshotParent snapshot parent
|
||||
* */
|
||||
private String updateCheckpointXml(String checkpointXml, String snapshotParent) throws ParserConfigurationException, XPathExpressionException, IOException, SAXException, TransformerException {
|
||||
logger.debug("Parsing checkpoint XML [{}].", checkpointXml);
|
||||
|
||||
InputStream in = IOUtils.toInputStream(checkpointXml);
|
||||
DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory();
|
||||
DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
|
||||
Document doc = docBuilder.parse(in);
|
||||
XPath xPath = XPathFactory.newInstance().newXPath();
|
||||
|
||||
updateParent(snapshotParent, doc, xPath);
|
||||
|
||||
removeUnnecessaryDisks(doc, xPath);
|
||||
|
||||
String finalXml = LibvirtXMLParser.getXml(doc);
|
||||
|
||||
logger.debug("Checkpoint XML after parsing is [{}].", finalXml);
|
||||
|
||||
return finalXml;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes all the disk definitions on the checkpoint XML from disks that were not affected.
|
||||
* @param checkpointXml the checkpoint XML to be updated.
|
||||
* */
|
||||
private void removeUnnecessaryDisks(Document checkpointXml, XPath xPath) throws XPathExpressionException {
|
||||
Node disksNode = (Node) xPath.compile("/domaincheckpoint/disks").evaluate(checkpointXml, XPathConstants.NODE);
|
||||
NodeList disksNodeChildren = disksNode.getChildNodes();
|
||||
for (int j = 0; j < disksNodeChildren.getLength(); j++) {
|
||||
Node diskNode = disksNodeChildren.item(j);
|
||||
if (diskNode == null) {
|
||||
continue;
|
||||
}
|
||||
if ("disk".equals(diskNode.getNodeName()) && "no".equals(diskNode.getAttributes().getNamedItem("checkpoint").getNodeValue())) {
|
||||
disksNode.removeChild(diskNode);
|
||||
logger.trace("Removing node [{}].", diskNode);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the parent on the {@code checkpointXml} to {@code snapshotParent}. If {@code snapshotParent} is null, removes the parent.
|
||||
* @param checkpointXml the checkpoint XML to be updated
|
||||
* @param snapshotParent the snapshot parent. Inform null if no parent.
|
||||
* */
|
||||
private void updateParent(String snapshotParent, Document checkpointXml, XPath xPath) throws XPathExpressionException {
|
||||
if (snapshotParent == null) {
|
||||
Object parentNodeObject = xPath.compile("/domaincheckpoint/parent").evaluate(checkpointXml, XPathConstants.NODE);
|
||||
if (parentNodeObject == null) {
|
||||
return;
|
||||
}
|
||||
Node parentNode = (Node) parentNodeObject;
|
||||
parentNode.getParentNode().removeChild(parentNode);
|
||||
return;
|
||||
}
|
||||
|
||||
Node parentNameNode = (Node) xPath.compile("/domaincheckpoint/parent/name").evaluate(checkpointXml, XPathConstants.NODE);
|
||||
parentNameNode.setTextContent(snapshotParent);
|
||||
}
|
||||
|
||||
/**
|
||||
* If imageStore is not null, copy the snapshot directly to secondary storage, else, copy it to the primary storage.
|
||||
*
|
||||
* @return SnapshotObjectTO of the new snapshot.
|
||||
* */
|
||||
private SnapshotObjectTO takeFullVolumeSnapshotOfRunningVm(CreateObjectCommand cmd, KVMStoragePool primaryPool, KVMStoragePool secondaryPool, KVMPhysicalDisk disk, String snapshotName,
|
||||
Connect conn, String vmName, String diskPath, Domain vm, VolumeObjectTO volume, String snapshotPath) throws IOException, LibvirtException {
|
||||
logger.debug("Taking full volume snapshot of volume [{}] attached to running VM [{}]. Snapshot will be copied to [{}].", volume, vmName,
|
||||
ObjectUtils.defaultIfNull(secondaryPool, primaryPool));
|
||||
|
||||
validateAvailableSizeOnPoolToTakeVolumeSnapshot(primaryPool, disk);
|
||||
String relativePath = null;
|
||||
try {
|
||||
String diskLabel = takeVolumeSnapshot(resource.getDisks(conn, vmName), snapshotName, diskPath, vm);
|
||||
|
||||
Pair<String, String> fullSnapPathAndDirPath = getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(primaryPool, secondaryPool, snapshotName, volume, false);
|
||||
|
||||
snapshotPath = fullSnapPathAndDirPath.first();
|
||||
String directoryPath = fullSnapPathAndDirPath.second();
|
||||
relativePath = directoryPath + File.separator + snapshotName;
|
||||
|
||||
String convertResult = convertBaseFileToSnapshotFileInStorageDir(ObjectUtils.defaultIfNull(secondaryPool, primaryPool), disk, snapshotPath, directoryPath, volume, cmd.getWait());
|
||||
|
||||
mergeSnapshotIntoBaseFile(vm, diskLabel, diskPath, snapshotName, volume, conn);
|
||||
|
||||
validateConvertResult(convertResult, snapshotPath);
|
||||
} catch (LibvirtException e) {
|
||||
if (!e.getMessage().contains(LIBVIRT_OPERATION_NOT_SUPPORTED_MESSAGE)) {
|
||||
throw e;
|
||||
}
|
||||
|
||||
logger.info("It was not possible to take live disk snapshot for volume [{}], in VM [{}], due to [{}]. We will take full snapshot of the VM"
|
||||
+ " and extract the disk instead. Consider upgrading your QEMU binary.", volume, vmName, e.getMessage());
|
||||
|
||||
takeFullVmSnapshotForBinariesThatDoesNotSupportLiveDiskSnapshot(vm, snapshotName, vmName);
|
||||
primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR);
|
||||
extractDiskFromFullVmSnapshot(disk, volume, snapshotPath, snapshotName, vmName, vm);
|
||||
}
|
||||
|
||||
/*
|
||||
* libvirt on RHEL6 doesn't handle resume event emitted from
|
||||
* qemu
|
||||
*/
|
||||
vm = resource.getDomain(conn, vmName);
|
||||
DomainInfo.DomainState state = vm.getInfo().state;
|
||||
if (state == DomainInfo.DomainState.VIR_DOMAIN_PAUSED) {
|
||||
vm.resume();
|
||||
}
|
||||
|
||||
return createSnapshotToAndUpdatePathAndSize(secondaryPool == null ? snapshotPath : relativePath, snapshotPath);
|
||||
}
|
||||
|
||||
|
||||
private SnapshotObjectTO takeFullVolumeSnapshotOfStoppedVm(CreateObjectCommand cmd, KVMStoragePool primaryPool, KVMStoragePool secondaryPool, String snapshotName, KVMPhysicalDisk disk, VolumeObjectTO volume) throws IOException {
|
||||
logger.debug("Taking full volume snapshot of volume [{}]. Snapshot will be copied to [{}].", volume,
|
||||
ObjectUtils.defaultIfNull(secondaryPool, primaryPool));
|
||||
Pair<String, String> fullSnapPathAndDirPath = getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(primaryPool, secondaryPool, snapshotName, volume, false);
|
||||
|
||||
String snapshotPath = fullSnapPathAndDirPath.first();
|
||||
String directoryPath = fullSnapPathAndDirPath.second();
|
||||
String relativePath = directoryPath + File.separator + snapshotName;
|
||||
|
||||
String convertResult = convertBaseFileToSnapshotFileInStorageDir(ObjectUtils.defaultIfNull(secondaryPool, primaryPool), disk, snapshotPath, directoryPath, volume, cmd.getWait());
|
||||
|
||||
validateConvertResult(convertResult, snapshotPath);
|
||||
|
||||
return createSnapshotToAndUpdatePathAndSize(secondaryPool == null ? snapshotPath : relativePath, snapshotPath);
|
||||
}
|
||||
|
||||
private CreateObjectAnswer takeClvmVolumeSnapshotOfStoppedVm(KVMPhysicalDisk disk, String snapshotName) {
|
||||
/* VM is not running, create a snapshot by ourself */
|
||||
final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, logger);
|
||||
command.add(MANAGE_SNAPSTHOT_CREATE_OPTION, disk.getPath());
|
||||
command.add(NAME_OPTION, snapshotName);
|
||||
final String result = command.execute();
|
||||
if (result != null) {
|
||||
String message = String.format("Failed to manage snapshot [%s] due to: [%s].", snapshotName, result);
|
||||
logger.debug(message);
|
||||
return new CreateObjectAnswer(message);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* For RBD we can't use libvirt to do our snapshotting or any Bash scripts.
|
||||
* libvirt also wants to store the memory contents of the Virtual Machine,
|
||||
* but that's not possible with RBD since there is no way to store the memory
|
||||
* contents in RBD.
|
||||
* <p>
|
||||
* So we rely on the Java bindings for RBD to create our snapshot
|
||||
* <p>
|
||||
* This snapshot might not be 100% consistent due to writes still being in the
|
||||
* memory of the Virtual Machine, but if the VM runs a kernel which supports
|
||||
* barriers properly (>2.6.32) this won't be any different then pulling the power
|
||||
* cord out of a running machine.
|
||||
*/
|
||||
private void takeRbdVolumeSnapshotOfStoppedVm(KVMStoragePool primaryPool, KVMPhysicalDisk disk, String snapshotName) {
|
||||
try {
|
||||
Rados r = radosConnect(primaryPool);
|
||||
|
||||
final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir());
|
||||
final Rbd rbd = new Rbd(io);
|
||||
final RbdImage image = rbd.open(disk.getName());
|
||||
|
||||
logger.debug("Attempting to create RBD snapshot {}@{}", disk.getName(), snapshotName);
|
||||
image.snapCreate(snapshotName);
|
||||
|
||||
rbd.close(image);
|
||||
r.ioCtxDestroy(io);
|
||||
} catch (final Exception e) {
|
||||
logger.error("A RBD snapshot operation on [{}] failed. The error was: {}", disk.getName(), e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the disk label to take snapshot;
|
||||
* @param disks List of VM's disks;
|
||||
* @param diskPath Path of the disk to take snapshot;
|
||||
* @param vm VM in which disks are;
|
||||
* @return the label to take snapshot. If the disk path is not found in VM's XML, it will throw a CloudRuntimeException.
|
||||
* @throws org.libvirt.LibvirtException if the disk is not found
|
||||
*/
|
||||
protected String getDiskLabelToSnapshot(List<DiskDef> disks, String diskPath, Domain vm) throws LibvirtException {
|
||||
logger.debug("Searching disk label of disk with path [{}] on VM [{}].", diskPath, vm.getName());
|
||||
for (DiskDef disk : disks) {
|
||||
String diskDefPath = disk.getDiskPath();
|
||||
|
||||
if (StringUtils.isEmpty(diskDefPath)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!diskDefPath.contains(diskPath)) {
|
||||
continue;
|
||||
}
|
||||
logger.debug("Found disk label [{}] for volume with path [{}] on VM [{}].", disk.getDiskLabel(), diskPath, vm.getName());
|
||||
|
||||
return disk.getDiskLabel();
|
||||
}
|
||||
|
||||
throw new CloudRuntimeException(String.format("VM [%s] has no disk with path [%s]. VM's XML [%s].", vm.getName(), diskPath, vm.getXMLDesc(0)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the fully qualified path of the snapshot or checkpoint and the directory path. If a secondary pool is informed, the path will be on the secondary pool,
|
||||
* otherwise, the path will be on the primary pool.
|
||||
* @param primaryPool Primary pool definition, the path returned will be here if no secondary pool is informed;
|
||||
* @param secondaryPool Secondary pool definition. If informed, the primary pool will be ignored and the path returned will be on the secondary pool;
|
||||
* @param snapshotName Name of the snapshot;
|
||||
* @param volume Volume that is being snapshot;
|
||||
* @param checkpoint Whether to return a path for a snapshot or a snapshot's checkpoint;
|
||||
* @return Fully qualified path and the directory path of the snapshot/checkpoint.
|
||||
* */
|
||||
private Pair<String, String> getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(KVMStoragePool primaryPool, KVMStoragePool secondaryPool, String snapshotName,
|
||||
VolumeObjectTO volume, boolean checkpoint) {
|
||||
String fullSnapshotPath;
|
||||
String dirPath;
|
||||
|
||||
if (secondaryPool == null) {
|
||||
fullSnapshotPath = getSnapshotOrCheckpointPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName, checkpoint);
|
||||
dirPath = checkpoint ? TemplateConstants.DEFAULT_CHECKPOINT_ROOT_DIR : TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR;
|
||||
} else {
|
||||
Pair<String, String> fullPathAndDirectoryPath = getSnapshotOrCheckpointPathAndDirectoryPathInSecondaryStorage(secondaryPool.getLocalPath(), snapshotName,
|
||||
volume.getAccountId(), volume.getVolumeId(), checkpoint);
|
||||
|
||||
fullSnapshotPath = fullPathAndDirectoryPath.first();
|
||||
dirPath = fullPathAndDirectoryPath.second();
|
||||
}
|
||||
return new Pair<>(fullSnapshotPath, dirPath);
|
||||
}
|
||||
|
||||
protected void deleteFullVmSnapshotAfterConvertingItToExternalDiskSnapshot(Domain vm, String snapshotName, VolumeObjectTO volume, String vmName) throws LibvirtException {
|
||||
logger.debug(String.format("Deleting full VM snapshot [%s] of VM [%s] as we already converted it to an external disk snapshot of the volume [%s].", snapshotName, vmName,
|
||||
volume));
|
||||
|
|
@ -1978,18 +2485,22 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
|
||||
/**
|
||||
* Creates the snapshot directory in the primary storage, if it does not exist; then, converts the base file (VM's old writing file) to the snapshot directory.
|
||||
* @param primaryPool Storage to create folder, if not exists;
|
||||
* @param baseFile Base file of VM, which will be converted;
|
||||
* @param pool Storage to create folder, if not exists;
|
||||
* @param baseFile Base file of VM, which will be converted;
|
||||
* @param snapshotPath Path to convert the base file;
|
||||
* @param snapshotFolder Folder where the snapshot will be converted to;
|
||||
* @param volume Volume being snapshot, used for logging only;
|
||||
* @param wait timeout;
|
||||
* @return null if the conversion occurs successfully or an error message that must be handled.
|
||||
*/
|
||||
protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool primaryPool,
|
||||
KVMPhysicalDisk baseFile, String snapshotPath, VolumeObjectTO volume, int wait) {
|
||||
|
||||
protected String convertBaseFileToSnapshotFileInStorageDir(KVMStoragePool pool,
|
||||
KVMPhysicalDisk baseFile, String snapshotPath, String snapshotFolder, VolumeObjectTO volume, int wait) {
|
||||
try (KeyFile srcKey = new KeyFile(volume.getPassphrase())) {
|
||||
logger.debug(
|
||||
String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath));
|
||||
"Trying to convert volume [{}] ({}) to snapshot [{}].", volume, baseFile, snapshotPath);
|
||||
|
||||
primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR);
|
||||
pool.createFolder(snapshotFolder);
|
||||
convertTheBaseFileToSnapshot(baseFile, snapshotPath, wait, srcKey);
|
||||
} catch (QemuImgException | LibvirtException | IOException ex) {
|
||||
return String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volume, baseFile,
|
||||
|
|
@ -2022,14 +2533,26 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
q.convert(srcFile, destFile, options, qemuObjects, qemuImageOpts, null, true);
|
||||
}
|
||||
|
||||
protected String getSnapshotOrCheckpointPathInPrimaryStorage(String primaryStoragePath, String snapshotName, boolean checkpoint) {
|
||||
String rootDir = checkpoint ? TemplateConstants.DEFAULT_CHECKPOINT_ROOT_DIR : TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR;
|
||||
return String.format("%s%s%s%s%s", primaryStoragePath, File.separator, rootDir, File.separator, snapshotName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the path of the snapshot on primary storage snapshot's dir.
|
||||
* @param primaryStoragePath Path of the primary storage;
|
||||
* Retrieves the path of the snapshot or snapshot's checkpoint on secondary storage snapshot's dir.
|
||||
* @param secondaryStoragePath Path of the secondary storage;
|
||||
* @param snapshotName Snapshot name;
|
||||
* @return the path of the snapshot in primary storage snapshot's dir.
|
||||
* @param accountId accountId;
|
||||
* @param volumeId volumeId;
|
||||
* @param checkpoint Whether to return a path for a snapshot or a snapshot's checkpoint;
|
||||
* @return the path of the snapshot or snapshot's checkpoint in secondary storage and the snapshot's dir.
|
||||
*/
|
||||
protected String getSnapshotPathInPrimaryStorage(String primaryStoragePath, String snapshotName) {
|
||||
return String.format("%s%s%s%s%s", primaryStoragePath, File.separator, TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR, File.separator, snapshotName);
|
||||
protected Pair<String, String> getSnapshotOrCheckpointPathAndDirectoryPathInSecondaryStorage(String secondaryStoragePath, String snapshotName, long accountId, long volumeId, boolean checkpoint) {
|
||||
String rootDir = checkpoint ? TemplateConstants.DEFAULT_CHECKPOINT_ROOT_DIR : TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR;
|
||||
String snapshotParentDirectories = String.format("%s%s%s%s%s", rootDir, File.separator, accountId, File.separator, volumeId);
|
||||
String fullSnapPath = String.format("%s%s%s%s%s", secondaryStoragePath, File.separator, snapshotParentDirectories, File.separator, snapshotName);
|
||||
|
||||
return new Pair<>(fullSnapPath, snapshotParentDirectories);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -2276,6 +2799,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
KVMPhysicalDisk disk = storagePoolMgr.copyPhysicalDisk(snapshotDisk, path != null ? path : volUuid, primaryPool, cmd.getWaitInMillSeconds());
|
||||
|
||||
storagePoolMgr.disconnectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path);
|
||||
secondaryPool.delete();
|
||||
return disk;
|
||||
}
|
||||
|
||||
|
|
@ -2378,6 +2902,9 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
logger.info(String.format("Deleting snapshot (id=%s, name=%s, path=%s, storage type=%s) on primary storage", snapshotTO.getId(), snapshotTO.getName(),
|
||||
snapshotTO.getPath(), primaryPool.getType()));
|
||||
deleteSnapshotFile(snapshotTO);
|
||||
if (snapshotTO.isKvmIncrementalSnapshot()) {
|
||||
deleteCheckpoint(snapshotTO);
|
||||
}
|
||||
} else {
|
||||
logger.warn("Operation not implemented for storage pool type of " + primaryPool.getType().toString());
|
||||
throw new InternalErrorException("Operation not implemented for storage pool type of " + primaryPool.getType().toString());
|
||||
|
|
@ -2399,6 +2926,24 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the checkpoint dump if it exists. And deletes the checkpoint definition on the VM if it is running.
|
||||
* */
|
||||
protected void deleteCheckpoint(SnapshotObjectTO snapshotTO) throws IOException {
|
||||
if (snapshotTO.getCheckpointPath() != null) {
|
||||
Files.deleteIfExists(Path.of(snapshotTO.getCheckpointPath()));
|
||||
}
|
||||
|
||||
String vmName = snapshotTO.getVmName();
|
||||
if (vmName == null) {
|
||||
return;
|
||||
}
|
||||
String checkpointName = snapshotTO.getPath().substring(snapshotTO.getPath().lastIndexOf(File.separator) + 1);
|
||||
|
||||
logger.debug("Deleting checkpoint [{}] of VM [{}].", checkpointName, vmName);
|
||||
Script.runSimpleBashScript(String.format(LibvirtComputingResource.CHECKPOINT_DELETE_COMMAND, vmName, checkpointName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the snapshot's file.
|
||||
* @throws CloudRuntimeException If can't delete the snapshot file.
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import java.util.stream.Collectors;
|
|||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.utils.cryptsetup.KeyFile;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImageOptions;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
|
|
@ -1608,13 +1609,13 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
} else {
|
||||
destFile = new QemuImgFile(destPath, destFormat);
|
||||
try {
|
||||
qemu.convert(srcFile, destFile);
|
||||
qemu.convert(srcFile, destFile, null, null, new QemuImageOptions(srcFile.getFormat(), srcFile.getFileName(), null), null, false, true);
|
||||
Map<String, String> destInfo = qemu.info(destFile);
|
||||
Long virtualSize = Long.parseLong(destInfo.get(QemuImg.VIRTUAL_SIZE));
|
||||
newDisk.setVirtualSize(virtualSize);
|
||||
newDisk.setSize(virtualSize);
|
||||
} catch (QemuImgException | LibvirtException e) {
|
||||
logger.error("Failed to convert " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage());
|
||||
} catch (QemuImgException e) {
|
||||
logger.error("Failed to convert [{}] to [{}] due to: [{}].", srcFile.getFileName(), destFile.getFileName(), e.getMessage(), e);
|
||||
newDisk = null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import java.util.Map;
|
|||
import java.util.UUID;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
|
||||
import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManager;
|
||||
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
|
||||
import org.apache.cloudstack.utils.cryptsetup.CryptSetup;
|
||||
import org.apache.cloudstack.utils.cryptsetup.CryptSetupException;
|
||||
|
|
@ -148,12 +149,37 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
|||
@Override
|
||||
public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type, Map<String, String> details, boolean isPrimaryStorage) {
|
||||
ScaleIOStoragePool storagePool = new ScaleIOStoragePool(uuid, host, port, path, type, details, this);
|
||||
if (details != null && details.containsKey(ScaleIOSDCManager.ConnectOnDemand.key())) {
|
||||
String connectOnDemand = details.get(ScaleIOSDCManager.ConnectOnDemand.key());
|
||||
if (connectOnDemand != null && !Boolean.parseBoolean(connectOnDemand)) {
|
||||
Ternary<Boolean, Map<String, String>, String> prepareStorageClientStatus = prepareStorageClient(uuid, details);
|
||||
if (prepareStorageClientStatus.first()) {
|
||||
details.putAll(prepareStorageClientStatus.second());
|
||||
}
|
||||
}
|
||||
}
|
||||
MapStorageUuidToStoragePool.put(uuid, storagePool);
|
||||
return storagePool;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteStoragePool(String uuid) {
|
||||
ScaleIOStoragePool storagePool = (ScaleIOStoragePool) MapStorageUuidToStoragePool.get(uuid);
|
||||
if (storagePool != null) {
|
||||
unprepareStorageClient(uuid, storagePool.getDetails());
|
||||
}
|
||||
return MapStorageUuidToStoragePool.remove(uuid) != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteStoragePool(String uuid, Map<String, String> details) {
|
||||
if (details != null && details.containsKey(ScaleIOSDCManager.ConnectOnDemand.key())) {
|
||||
String connectOnDemand = details.get(ScaleIOSDCManager.ConnectOnDemand.key());
|
||||
if (connectOnDemand != null && !Boolean.parseBoolean(connectOnDemand)) {
|
||||
Pair<Boolean, String> unprepareStorageClientStatus = unprepareStorageClient(uuid, details);
|
||||
return MapStorageUuidToStoragePool.remove(uuid) != null && unprepareStorageClientStatus.first();
|
||||
}
|
||||
}
|
||||
return MapStorageUuidToStoragePool.remove(uuid) != null;
|
||||
}
|
||||
|
||||
|
|
@ -567,7 +593,7 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
|||
qemu.resize(options, objects, usableSizeBytes);
|
||||
}
|
||||
|
||||
public Ternary<Boolean, Map<String, String>, String> prepareStorageClient(Storage.StoragePoolType type, String uuid, Map<String, String> details) {
|
||||
public Ternary<Boolean, Map<String, String>, String> prepareStorageClient(String uuid, Map<String, String> details) {
|
||||
if (!ScaleIOUtil.isSDCServiceInstalled()) {
|
||||
logger.debug("SDC service not installed on host, preparing the SDC client not possible");
|
||||
return new Ternary<>(false, null, "SDC service not installed on host");
|
||||
|
|
@ -584,14 +610,28 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
|||
if (!ScaleIOUtil.startSDCService()) {
|
||||
return new Ternary<>(false, null, "Couldn't start SDC service on host");
|
||||
}
|
||||
} else if (!ScaleIOUtil.restartSDCService()) {
|
||||
return new Ternary<>(false, null, "Couldn't restart SDC service on host");
|
||||
}
|
||||
|
||||
if (details != null && details.containsKey(ScaleIOGatewayClient.STORAGE_POOL_MDMS)) {
|
||||
// Assuming SDC service is started, add mdms
|
||||
String mdms = details.get(ScaleIOGatewayClient.STORAGE_POOL_MDMS);
|
||||
String[] mdmAddresses = mdms.split(",");
|
||||
if (mdmAddresses.length > 0) {
|
||||
if (ScaleIOUtil.mdmAdded(mdmAddresses[0])) {
|
||||
return new Ternary<>(true, getSDCDetails(details), "MDM added, no need to prepare the SDC client");
|
||||
}
|
||||
|
||||
ScaleIOUtil.addMdms(Arrays.asList(mdmAddresses));
|
||||
if (!ScaleIOUtil.mdmAdded(mdmAddresses[0])) {
|
||||
return new Ternary<>(false, null, "Failed to add MDMs");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new Ternary<>( true, getSDCDetails(details), "Prepared client successfully");
|
||||
}
|
||||
|
||||
public Pair<Boolean, String> unprepareStorageClient(Storage.StoragePoolType type, String uuid) {
|
||||
public Pair<Boolean, String> unprepareStorageClient(String uuid, Map<String, String> details) {
|
||||
if (!ScaleIOUtil.isSDCServiceInstalled()) {
|
||||
logger.debug("SDC service not installed on host, no need to unprepare the SDC client");
|
||||
return new Pair<>(true, "SDC service not installed on host, no need to unprepare the SDC client");
|
||||
|
|
@ -602,8 +642,19 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
|||
return new Pair<>(true, "SDC service not enabled on host, no need to unprepare the SDC client");
|
||||
}
|
||||
|
||||
if (!ScaleIOUtil.stopSDCService()) {
|
||||
return new Pair<>(false, "Couldn't stop SDC service on host");
|
||||
if (details != null && details.containsKey(ScaleIOGatewayClient.STORAGE_POOL_MDMS)) {
|
||||
String mdms = details.get(ScaleIOGatewayClient.STORAGE_POOL_MDMS);
|
||||
String[] mdmAddresses = mdms.split(",");
|
||||
if (mdmAddresses.length > 0) {
|
||||
if (!ScaleIOUtil.mdmAdded(mdmAddresses[0])) {
|
||||
return new Pair<>(true, "MDM not added, no need to unprepare the SDC client");
|
||||
}
|
||||
|
||||
ScaleIOUtil.removeMdms(Arrays.asList(mdmAddresses));
|
||||
if (ScaleIOUtil.mdmAdded(mdmAddresses[0])) {
|
||||
return new Pair<>(false, "Failed to remove MDMs, unable to unprepare the SDC client");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new Pair<>(true, "Unprepared SDC client successfully");
|
||||
|
|
|
|||
|
|
@ -44,6 +44,10 @@ public interface StorageAdaptor {
|
|||
|
||||
public boolean deleteStoragePool(String uuid);
|
||||
|
||||
public default boolean deleteStoragePool(String uuid, Map<String, String> details) {
|
||||
return true;
|
||||
}
|
||||
|
||||
public default KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool,
|
||||
PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, Long usableSize, byte[] passphrase) {
|
||||
return createPhysicalDisk(name, pool, format, provisioningType, size, passphrase);
|
||||
|
|
@ -127,22 +131,21 @@ public interface StorageAdaptor {
|
|||
|
||||
/**
|
||||
* Prepares the storage client.
|
||||
* @param type type of the storage pool
|
||||
* @param uuid uuid of the storage pool
|
||||
* @param details any details of the storage pool that are required for client preparation
|
||||
* @return status, client details, & message in case failed
|
||||
*/
|
||||
default Ternary<Boolean, Map<String, String>, String> prepareStorageClient(StoragePoolType type, String uuid, Map<String, String> details) {
|
||||
default Ternary<Boolean, Map<String, String>, String> prepareStorageClient(String uuid, Map<String, String> details) {
|
||||
return new Ternary<>(true, new HashMap<>(), "");
|
||||
}
|
||||
|
||||
/**
|
||||
* Unprepares the storage client.
|
||||
* @param type type of the storage pool
|
||||
* @param uuid uuid of the storage pool
|
||||
* @param details any details of the storage pool that are required for client unpreparation
|
||||
* @return status, & message in case failed
|
||||
*/
|
||||
default Pair<Boolean, String> unprepareStorageClient(StoragePoolType type, String uuid) {
|
||||
default Pair<Boolean, String> unprepareStorageClient(String uuid, Map<String, String> details) {
|
||||
return new Pair<>(true, "");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ public class QemuImg {
|
|||
public static final String ENCRYPT_KEY_SECRET = "encrypt.key-secret";
|
||||
public static final String TARGET_ZERO_FLAG = "--target-is-zero";
|
||||
public static final long QEMU_2_10 = 2010000;
|
||||
public static final long QEMU_5_10 = 5010000;
|
||||
|
||||
/* The qemu-img binary. We expect this to be in $PATH */
|
||||
public String _qemuImgPath = "qemu-img";
|
||||
|
|
@ -109,6 +110,10 @@ public class QemuImg {
|
|||
}
|
||||
}
|
||||
|
||||
public enum BitmapOperation {
|
||||
Add, Remove, Clear, Enable, Disable, Merge
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a QemuImg object that supports skipping target zeroes
|
||||
* We detect this support via qemu-img help since support can
|
||||
|
|
@ -381,6 +386,37 @@ public class QemuImg {
|
|||
*/
|
||||
public void convert(final QemuImgFile srcFile, final QemuImgFile destFile,
|
||||
final Map<String, String> options, final List<QemuObject> qemuObjects, final QemuImageOptions srcImageOpts, final String snapshotName, final boolean forceSourceFormat) throws QemuImgException {
|
||||
convert(srcFile, destFile, options, qemuObjects, srcImageOpts, snapshotName, forceSourceFormat, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an image from source to destination.
|
||||
*
|
||||
* This method is a facade for 'qemu-img convert' and converts a disk image or snapshot into a disk image with the specified filename and format.
|
||||
*
|
||||
* @param srcFile
|
||||
* The source file.
|
||||
* @param destFile
|
||||
* The destination file.
|
||||
* @param options
|
||||
* Options for the conversion. Takes a Map<String, String> with key value
|
||||
* pairs which are passed on to qemu-img without validation.
|
||||
* @param qemuObjects
|
||||
* Pass qemu Objects to create - see objects in the qemu main page.
|
||||
* @param srcImageOpts
|
||||
* pass qemu --image-opts to convert.
|
||||
* @param snapshotName
|
||||
* If it is provided, conversion uses it as parameter.
|
||||
* @param forceSourceFormat
|
||||
* If true, specifies the source format in the conversion command.
|
||||
* @param keepBitmaps
|
||||
* If true, copies the bitmaps to the destination image.
|
||||
* @return void
|
||||
*/
|
||||
public void convert(final QemuImgFile srcFile, final QemuImgFile destFile,
|
||||
final Map<String, String> options, final List<QemuObject> qemuObjects, final QemuImageOptions srcImageOpts, final String snapshotName, final boolean forceSourceFormat,
|
||||
boolean keepBitmaps) throws QemuImgException {
|
||||
|
||||
Script script = new Script(_qemuImgPath, timeout);
|
||||
if (StringUtils.isNotBlank(snapshotName)) {
|
||||
String qemuPath = Script.runSimpleBashScript(getQemuImgPathScript);
|
||||
|
|
@ -430,6 +466,10 @@ public class QemuImg {
|
|||
script.add(srcFile.getFileName());
|
||||
}
|
||||
|
||||
if (this.version >= QEMU_5_10 && keepBitmaps) {
|
||||
script.add("--bitmaps");
|
||||
}
|
||||
|
||||
script.add(destFile.getFileName());
|
||||
|
||||
final String result = script.execute();
|
||||
|
|
@ -859,4 +899,39 @@ public class QemuImg {
|
|||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Perform one or more modifications of the persistent bitmap in {@code srcFile}
|
||||
* <br>
|
||||
* This method is a facade for 'qemu-img bitmap'.
|
||||
* <br>
|
||||
* Currently only the {@link BitmapOperation#Remove} is implemented
|
||||
*
|
||||
* @param bitmapOperation
|
||||
* The operation to be performed
|
||||
* @param srcfile
|
||||
* The src file where the operation will be performed
|
||||
* @param bitmapName
|
||||
* The name of the bitmap
|
||||
*/
|
||||
public void bitmap(BitmapOperation bitmapOperation, QemuImgFile srcfile, String bitmapName) throws QemuImgException {
|
||||
if (bitmapOperation != BitmapOperation.Remove) {
|
||||
throw new QemuImgException("Operation not implemented.");
|
||||
}
|
||||
removeBitmap(srcfile, bitmapName);
|
||||
}
|
||||
|
||||
private void removeBitmap(QemuImgFile srcFile, String bitmapName) throws QemuImgException {
|
||||
final Script script = new Script(_qemuImgPath);
|
||||
script.add("bitmap");
|
||||
script.add("--remove");
|
||||
script.add(srcFile.getFileName());
|
||||
script.add(bitmapName);
|
||||
|
||||
String result = script.execute();
|
||||
if (result != null) {
|
||||
throw new QemuImgException(String.format("Exception while removing bitmap [%s] from file [%s]. Result is [%s].", srcFile.getFileName(), bitmapName, result));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import static org.junit.Assert.assertNotEquals;
|
|||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.nullable;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
|
|
@ -241,6 +242,13 @@ public class LibvirtComputingResourceTest {
|
|||
@Mock
|
||||
LibvirtDomainXMLParser parserMock;
|
||||
|
||||
@Mock
|
||||
DiskTO diskToMock;
|
||||
|
||||
@Mock
|
||||
private VolumeObjectTO volumeObjectToMock;
|
||||
|
||||
|
||||
@Spy
|
||||
private LibvirtComputingResource libvirtComputingResourceSpy = Mockito.spy(new LibvirtComputingResource());
|
||||
|
||||
|
|
@ -5254,8 +5262,8 @@ public class LibvirtComputingResourceTest {
|
|||
try (MockedStatic<SshHelper> sshHelperMockedStatic = Mockito.mockStatic(SshHelper.class)) {
|
||||
sshHelperMockedStatic.when(() -> SshHelper.scpTo(
|
||||
Mockito.anyString(), Mockito.anyInt(),
|
||||
Mockito.anyString(), Mockito.any(File.class), nullable(String.class), Mockito.anyString(),
|
||||
Mockito.any(String[].class), Mockito.anyString())).thenAnswer(invocation -> null);
|
||||
Mockito.anyString(), any(File.class), nullable(String.class), Mockito.anyString(),
|
||||
any(String[].class), Mockito.anyString())).thenAnswer(invocation -> null);
|
||||
|
||||
final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance();
|
||||
assertNotNull(wrapper);
|
||||
|
|
@ -5334,8 +5342,8 @@ public class LibvirtComputingResourceTest {
|
|||
try (MockedStatic<SshHelper> sshHelperMockedStatic = Mockito.mockStatic(SshHelper.class)) {
|
||||
sshHelperMockedStatic.when(() -> SshHelper.scpTo(
|
||||
Mockito.anyString(), Mockito.anyInt(),
|
||||
Mockito.anyString(), Mockito.any(File.class), nullable(String.class), Mockito.anyString(),
|
||||
Mockito.any(String[].class), Mockito.anyString())).thenAnswer(invocation -> null);
|
||||
Mockito.anyString(), any(File.class), nullable(String.class), Mockito.anyString(),
|
||||
any(String[].class), Mockito.anyString())).thenAnswer(invocation -> null);
|
||||
final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance();
|
||||
assertNotNull(wrapper);
|
||||
|
||||
|
|
@ -5373,8 +5381,12 @@ public class LibvirtComputingResourceTest {
|
|||
when(vmSpec.getNics()).thenReturn(nics);
|
||||
when(vmSpec.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
when(vmSpec.getName()).thenReturn(vmName);
|
||||
when(vmSpec.getDisks()).thenReturn(new DiskTO[]{diskToMock});
|
||||
when(diskToMock.getData()).thenReturn(new VolumeObjectTO());
|
||||
when(libvirtComputingResourceMock.createVMFromSpec(vmSpec)).thenReturn(vmDef);
|
||||
|
||||
when(libvirtComputingResourceMock.recreateCheckpointsOnVm(any(), any(), any())).thenReturn(true);
|
||||
|
||||
when(libvirtComputingResourceMock.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper);
|
||||
try {
|
||||
when(libvirtUtilitiesHelper.getConnectionByType(vmDef.getHvsType())).thenReturn(conn);
|
||||
|
|
@ -5585,7 +5597,7 @@ public class LibvirtComputingResourceTest {
|
|||
public void testAddExtraConfigComponentEmptyExtraConfig() {
|
||||
libvirtComputingResourceMock = new LibvirtComputingResource();
|
||||
libvirtComputingResourceMock.addExtraConfigComponent(new HashMap<>(), vmDef);
|
||||
Mockito.verify(vmDef, never()).addComp(Mockito.any());
|
||||
Mockito.verify(vmDef, never()).addComp(any());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -5596,7 +5608,7 @@ public class LibvirtComputingResourceTest {
|
|||
extraConfig.put("extraconfig-2", "value2");
|
||||
extraConfig.put("extraconfig-3", "value3");
|
||||
libvirtComputingResourceMock.addExtraConfigComponent(extraConfig, vmDef);
|
||||
Mockito.verify(vmDef, times(1)).addComp(Mockito.any());
|
||||
Mockito.verify(vmDef, times(1)).addComp(any());
|
||||
}
|
||||
|
||||
public void validateGetCurrentMemAccordingToMemBallooningWithoutMemBalooning(){
|
||||
|
|
@ -5815,7 +5827,7 @@ public class LibvirtComputingResourceTest {
|
|||
|
||||
try (MockedStatic<AgentPropertiesFileHandler> ignored = Mockito.mockStatic(AgentPropertiesFileHandler.class);
|
||||
MockedStatic<NetUtils> netUtilsMockedStatic = Mockito.mockStatic(NetUtils.class)) {
|
||||
Mockito.when(AgentPropertiesFileHandler.getPropertyValue(Mockito.any())).thenReturn("cloudbr15",
|
||||
Mockito.when(AgentPropertiesFileHandler.getPropertyValue(any())).thenReturn("cloudbr15",
|
||||
"cloudbr28");
|
||||
|
||||
Mockito.when(NetUtils.getNetworkInterface(Mockito.anyString())).thenReturn(networkInterfaceMock1,
|
||||
|
|
@ -5890,7 +5902,7 @@ public class LibvirtComputingResourceTest {
|
|||
(mock, context) -> {
|
||||
doNothing().when(mock).add(Mockito.anyString());
|
||||
when(mock.execute()).thenReturn(null);
|
||||
when(mock.execute(Mockito.any())).thenReturn(null);
|
||||
when(mock.execute(any())).thenReturn(null);
|
||||
});
|
||||
MockedConstruction<OneLineParser> ignored = Mockito.mockConstruction(OneLineParser.class, (mock, context) -> {
|
||||
when(mock.getLine()).thenReturn("result");
|
||||
|
|
@ -5946,7 +5958,7 @@ public class LibvirtComputingResourceTest {
|
|||
(mock, context) -> {
|
||||
doNothing().when(mock).add(Mockito.anyString());
|
||||
when(mock.execute()).thenReturn(null);
|
||||
when(mock.execute(Mockito.any())).thenReturn(null);
|
||||
when(mock.execute(any())).thenReturn(null);
|
||||
}); MockedConstruction<OneLineParser> ignored2 = Mockito.mockConstruction(OneLineParser.class,
|
||||
(mock, context) -> {when(mock.getLine()).thenReturn("result");})) {
|
||||
|
||||
|
|
@ -5968,7 +5980,7 @@ public class LibvirtComputingResourceTest {
|
|||
(mock, context) -> {
|
||||
doNothing().when(mock).add(Mockito.anyString());
|
||||
when(mock.execute()).thenReturn(null);
|
||||
when(mock.execute(Mockito.any())).thenReturn(null);
|
||||
when(mock.execute(any())).thenReturn(null);
|
||||
}); MockedConstruction<OneLineParser> ignored2 = Mockito.mockConstruction(OneLineParser.class,
|
||||
(mock, context) -> {when(mock.getLine()).thenReturn("result");})) {
|
||||
|
||||
|
|
@ -5988,7 +6000,7 @@ public class LibvirtComputingResourceTest {
|
|||
|
||||
List<Integer> result = libvirtComputingResourceSpy.getVmsToSetMemoryBalloonStatsPeriod(connMock);
|
||||
|
||||
Mockito.verify(loggerMock).error(Mockito.anyString(), (Throwable) Mockito.any());
|
||||
Mockito.verify(loggerMock).error(Mockito.anyString(), (Throwable) any());
|
||||
Assert.assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
|
|
@ -6078,7 +6090,7 @@ public class LibvirtComputingResourceTest {
|
|||
Mockito.when(parserMock.parseDomainXML(Mockito.anyString())).thenReturn(true);
|
||||
Mockito.when(parserMock.getMemBalloon()).thenReturn(memBalloonDef);
|
||||
try (MockedStatic<Script> ignored = Mockito.mockStatic(Script.class)) {
|
||||
Mockito.when(Script.runSimpleBashScript(Mockito.any())).thenReturn(null);
|
||||
Mockito.when(Script.runSimpleBashScript(any())).thenReturn(null);
|
||||
|
||||
libvirtComputingResourceSpy.setupMemoryBalloonStatsPeriod(connMock);
|
||||
|
||||
|
|
@ -6569,4 +6581,43 @@ public class LibvirtComputingResourceTest {
|
|||
assertEquals(LibvirtVMDef.TpmDef.TpmModel.CRB, tpmDef.getModel());
|
||||
assertEquals(LibvirtVMDef.TpmDef.TpmVersion.V2_0, tpmDef.getVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void recreateCheckpointsOnVmTestVersionIsNotSufficient() {
|
||||
Mockito.doThrow(new CloudRuntimeException("")).when(libvirtComputingResourceSpy).validateLibvirtAndQemuVersionForIncrementalSnapshots();
|
||||
|
||||
boolean result = libvirtComputingResourceSpy.recreateCheckpointsOnVm(List.of(volumeObjectToMock), null, null);
|
||||
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void recreateCheckpointsOnVmTestVolumesDoNotHaveCheckpoints() {
|
||||
Mockito.doNothing().when(libvirtComputingResourceSpy).validateLibvirtAndQemuVersionForIncrementalSnapshots();
|
||||
|
||||
Mockito.doReturn(null).when(libvirtComputingResourceSpy).getDisks(Mockito.any(), Mockito.any());
|
||||
Mockito.doReturn(null).when(libvirtComputingResourceSpy).mapVolumeToDiskDef(Mockito.any(), Mockito.any());
|
||||
|
||||
boolean result = libvirtComputingResourceSpy.recreateCheckpointsOnVm(List.of(volumeObjectToMock), null, null);
|
||||
|
||||
Mockito.verify(libvirtComputingResourceSpy, Mockito.never()).recreateCheckpointsOfDisk(Mockito.any(), Mockito.any(), Mockito.any());
|
||||
Assert.assertTrue(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void recreateCheckpointsOnVmTestVolumesHaveCheckpoints() {
|
||||
Mockito.doNothing().when(libvirtComputingResourceSpy).validateLibvirtAndQemuVersionForIncrementalSnapshots();
|
||||
|
||||
Mockito.doReturn(null).when(libvirtComputingResourceSpy).getDisks(Mockito.any(), Mockito.any());
|
||||
Mockito.doReturn(null).when(libvirtComputingResourceSpy).mapVolumeToDiskDef(Mockito.any(), Mockito.any());
|
||||
|
||||
Mockito.doReturn(List.of("path")).when(volumeObjectToMock).getCheckpointPaths();
|
||||
|
||||
Mockito.doNothing().when(libvirtComputingResourceSpy).recreateCheckpointsOfDisk(Mockito.any(), Mockito.any(), Mockito.any());
|
||||
|
||||
boolean result = libvirtComputingResourceSpy.recreateCheckpointsOnVm(List.of(volumeObjectToMock), null, null);
|
||||
|
||||
Mockito.verify(libvirtComputingResourceSpy, Mockito.times(1)).recreateCheckpointsOfDisk(Mockito.any(), Mockito.any(), Mockito.any());
|
||||
Assert.assertTrue(result);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,6 +66,7 @@ public class LibvirtGetVmIpAddressCommandWrapperTest {
|
|||
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("validVmName");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.getMacAddress()).thenReturn("02:0c:02:f9:00:80");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(false);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, VIRSH_DOMIF_OUTPUT));
|
||||
|
||||
|
|
@ -88,6 +89,7 @@ public class LibvirtGetVmIpAddressCommandWrapperTest {
|
|||
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("invalidVmName!");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.getMacAddress()).thenReturn("02:0c:02:f9:00:80");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(false);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, VIRSH_DOMIF_OUTPUT));
|
||||
|
||||
|
|
@ -114,6 +116,7 @@ public class LibvirtGetVmIpAddressCommandWrapperTest {
|
|||
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("validVmName");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.getMacAddress()).thenReturn("02:0c:02:f9:00:80");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(true);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, "192.168.0.10"));
|
||||
|
||||
|
|
|
|||
|
|
@ -18,17 +18,18 @@
|
|||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.CopyOption;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.libvirt.LibvirtException;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
|
|
@ -70,6 +71,9 @@ public class LibvirtRevertSnapshotCommandWrapperTest {
|
|||
@Mock
|
||||
VolumeObjectTO volumeObjectToMock;
|
||||
|
||||
@Mock
|
||||
LibvirtComputingResource resourceMock;
|
||||
|
||||
@Test
|
||||
public void validateGetFullPathAccordingToStorage() {
|
||||
String snapshotPath = "snapshotPath";
|
||||
|
|
@ -82,25 +86,6 @@ public class LibvirtRevertSnapshotCommandWrapperTest {
|
|||
Assert.assertEquals(expectedResult, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void validateReplaceVolumeWithSnapshotReplaceFiles() throws IOException {
|
||||
try (MockedStatic<Files> ignored = Mockito.mockStatic(Files.class)) {
|
||||
Mockito.when(Files.copy(Mockito.any(Path.class), Mockito.any(Path.class), Mockito.any(CopyOption.class)))
|
||||
.thenReturn(pathMock);
|
||||
libvirtRevertSnapshotCommandWrapperSpy.replaceVolumeWithSnapshot("test", "test");
|
||||
}
|
||||
}
|
||||
|
||||
@Test (expected = IOException.class)
|
||||
public void validateReplaceVolumeWithSnapshotThrowsIOException() throws IOException {
|
||||
try (MockedStatic<Files> ignored = Mockito.mockStatic(Files.class)) {
|
||||
Mockito.when(
|
||||
Files.copy(Mockito.any(Path.class), Mockito.any(Path.class), Mockito.any(CopyOption.class)))
|
||||
.thenThrow(IOException.class);
|
||||
libvirtRevertSnapshotCommandWrapperSpy.replaceVolumeWithSnapshot("test", "test");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void validateGetSnapshotPathExistsOnPrimaryStorage() {
|
||||
String snapshotPath = "test";
|
||||
|
|
@ -159,20 +144,29 @@ public class LibvirtRevertSnapshotCommandWrapperTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void validateRevertVolumeToSnapshotReplaceSuccessfully() throws IOException {
|
||||
public void validateRevertVolumeToSnapshotReplaceSuccessfully() throws LibvirtException, QemuImgException {
|
||||
Mockito.doReturn(volumeObjectToMock).when(snapshotObjectToSecondaryMock).getVolume();
|
||||
Mockito.doReturn(pairStringSnapshotObjectToMock).when(libvirtRevertSnapshotCommandWrapperSpy).getSnapshot(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
|
||||
Mockito.doNothing().when(libvirtRevertSnapshotCommandWrapperSpy).replaceVolumeWithSnapshot(Mockito.any(), Mockito.any());
|
||||
libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, dataStoreToMock, kvmStoragePoolPrimaryMock,
|
||||
kvmStoragePoolSecondaryMock);
|
||||
libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(kvmStoragePoolSecondaryMock, snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, kvmStoragePoolPrimaryMock, resourceMock
|
||||
);
|
||||
}
|
||||
|
||||
@Test (expected = CloudRuntimeException.class)
|
||||
public void validateRevertVolumeToSnapshotReplaceVolumeThrowsIOException() throws IOException {
|
||||
public void validateRevertVolumeToSnapshotReplaceVolumeThrowsQemuImgException() throws LibvirtException, QemuImgException {
|
||||
Mockito.doReturn(volumeObjectToMock).when(snapshotObjectToSecondaryMock).getVolume();
|
||||
Mockito.doReturn(pairStringSnapshotObjectToMock).when(libvirtRevertSnapshotCommandWrapperSpy).getSnapshot(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
|
||||
Mockito.doThrow(IOException.class).when(libvirtRevertSnapshotCommandWrapperSpy).replaceVolumeWithSnapshot(Mockito.any(), Mockito.any());
|
||||
libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, dataStoreToMock, kvmStoragePoolPrimaryMock,
|
||||
kvmStoragePoolSecondaryMock);
|
||||
Mockito.doThrow(QemuImgException.class).when(libvirtRevertSnapshotCommandWrapperSpy).replaceVolumeWithSnapshot(Mockito.any(), Mockito.any());
|
||||
libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(kvmStoragePoolSecondaryMock, snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, kvmStoragePoolPrimaryMock, resourceMock
|
||||
);
|
||||
}
|
||||
|
||||
@Test (expected = CloudRuntimeException.class)
|
||||
public void validateRevertVolumeToSnapshotReplaceVolumeThrowsLibvirtException() throws LibvirtException, QemuImgException {
|
||||
Mockito.doReturn(volumeObjectToMock).when(snapshotObjectToSecondaryMock).getVolume();
|
||||
Mockito.doReturn(pairStringSnapshotObjectToMock).when(libvirtRevertSnapshotCommandWrapperSpy).getSnapshot(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
|
||||
Mockito.doThrow(LibvirtException.class).when(libvirtRevertSnapshotCommandWrapperSpy).replaceVolumeWithSnapshot(Mockito.any(), Mockito.any());
|
||||
libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(kvmStoragePoolSecondaryMock, snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, kvmStoragePoolPrimaryMock, resourceMock
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,6 +14,10 @@
|
|||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
|
@ -45,10 +49,13 @@ public class LibvirtUnprepareStorageClientCommandWrapperTest {
|
|||
UnprepareStorageClientCommand cmd = Mockito.mock(UnprepareStorageClientCommand.class);
|
||||
Mockito.when(cmd.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex);
|
||||
Mockito.when(cmd.getPoolUuid()).thenReturn(poolUuid);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(ScaleIOGatewayClient.STORAGE_POOL_MDMS, "1.1.1.1,2.2.2.2");
|
||||
Mockito.when(cmd.getDetails()).thenReturn(details);
|
||||
|
||||
KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
|
||||
Mockito.when(libvirtComputingResourceMock.getStoragePoolMgr()).thenReturn(storagePoolMgr);
|
||||
Mockito.when(storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid())).thenReturn(new Pair<>(true, ""));
|
||||
Mockito.when(storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid(), cmd.getDetails())).thenReturn(new Pair<>(true, ""));
|
||||
|
||||
UnprepareStorageClientAnswer result = (UnprepareStorageClientAnswer) libvirtUnprepareStorageClientCommandWrapperSpy.execute(cmd, libvirtComputingResourceMock);
|
||||
|
||||
|
|
@ -60,10 +67,13 @@ public class LibvirtUnprepareStorageClientCommandWrapperTest {
|
|||
UnprepareStorageClientCommand cmd = Mockito.mock(UnprepareStorageClientCommand.class);
|
||||
Mockito.when(cmd.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex);
|
||||
Mockito.when(cmd.getPoolUuid()).thenReturn(poolUuid);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(ScaleIOGatewayClient.STORAGE_POOL_MDMS, "1.1.1.1,2.2.2.2");
|
||||
Mockito.when(cmd.getDetails()).thenReturn(details);
|
||||
|
||||
KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
|
||||
Mockito.when(libvirtComputingResourceMock.getStoragePoolMgr()).thenReturn(storagePoolMgr);
|
||||
Mockito.when(storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid())).thenReturn(new Pair<>(false, "Unprepare storage client failed"));
|
||||
Mockito.when(storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid(), cmd.getDetails())).thenReturn(new Pair<>(false, "Unprepare storage client failed"));
|
||||
|
||||
UnprepareStorageClientAnswer result = (UnprepareStorageClientAnswer) libvirtUnprepareStorageClientCommandWrapperSpy.execute(cmd, libvirtComputingResourceMock);
|
||||
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ import org.apache.cloudstack.utils.qemu.QemuImageOptions;
|
|||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
|
@ -101,6 +102,9 @@ public class KVMStorageProcessorTest {
|
|||
@Mock
|
||||
LibvirtVMDef.DiskDef diskDefMock;
|
||||
|
||||
@Mock
|
||||
Logger loggerMock;
|
||||
|
||||
|
||||
private static final String directDownloadTemporaryPath = "/var/lib/libvirt/images/dd";
|
||||
private static final long templateSize = 80000L;
|
||||
|
|
@ -173,7 +177,7 @@ public class KVMStorageProcessorTest {
|
|||
String snapshotName = "snapshot";
|
||||
String expectedResult = String.format("%s%s%s%s%s", path, File.separator, TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR, File.separator, snapshotName);
|
||||
|
||||
String result = storageProcessor.getSnapshotPathInPrimaryStorage(path, snapshotName);
|
||||
String result = storageProcessor.getSnapshotOrCheckpointPathInPrimaryStorage(path, snapshotName, false);
|
||||
Assert.assertEquals(expectedResult, result);
|
||||
}
|
||||
|
||||
|
|
@ -261,15 +265,11 @@ public class KVMStorageProcessorTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestFailToConvertWithQemuImgExceptionReturnErrorMessage() throws QemuImgException {
|
||||
KVMPhysicalDisk baseFile = Mockito.mock(KVMPhysicalDisk.class);
|
||||
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestFailToConvertWithQemuImgExceptionReturnErrorMessage() {
|
||||
String errorMessage = "error";
|
||||
KVMStoragePool primaryPoolMock = Mockito.mock(KVMStoragePool.class);
|
||||
KVMPhysicalDisk baseFileMock = Mockito.mock(KVMPhysicalDisk.class);
|
||||
VolumeObjectTO volumeMock = Mockito.mock(VolumeObjectTO.class);
|
||||
QemuImgFile srcFileMock = Mockito.mock(QemuImgFile.class);
|
||||
QemuImgFile destFileMock = Mockito.mock(QemuImgFile.class);
|
||||
QemuImg qemuImgMock = Mockito.mock(QemuImg.class);
|
||||
|
||||
Mockito.when(baseFileMock.getPath()).thenReturn("/path/to/baseFile");
|
||||
Mockito.when(primaryPoolMock.createFolder(Mockito.anyString())).thenReturn(true);
|
||||
|
|
@ -280,22 +280,21 @@ public class KVMStorageProcessorTest {
|
|||
Mockito.lenient().doThrow(new QemuImgException(errorMessage)).when(mock).convert(Mockito.any(QemuImgFile.class), Mockito.any(QemuImgFile.class), Mockito.any(Map.class),
|
||||
Mockito.any(List.class), Mockito.any(QemuImageOptions.class),Mockito.nullable(String.class), Mockito.any(Boolean.class));
|
||||
}))) {
|
||||
String test = storageProcessor.convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPoolMock, baseFileMock, "/path/to/snapshot", volumeMock, 0);
|
||||
String test = storageProcessor.convertBaseFileToSnapshotFileInStorageDir(primaryPoolMock, baseFileMock, "/path/to/snapshot", TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR, volumeMock, 0);
|
||||
Assert.assertNotNull(test);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestFailToConvertWithLibvirtExceptionReturnErrorMessage() throws Exception {
|
||||
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestFailToConvertWithLibvirtExceptionReturnErrorMessage() {
|
||||
KVMPhysicalDisk baseFile = Mockito.mock(KVMPhysicalDisk.class);
|
||||
String snapshotPath = "snapshotPath";
|
||||
QemuImg qemuImg = Mockito.mock(QemuImg.class);
|
||||
|
||||
Mockito.doReturn(true).when(kvmStoragePoolMock).createFolder(Mockito.anyString());
|
||||
try (MockedConstruction<QemuImg> ignored = Mockito.mockConstructionWithAnswer(QemuImg.class, invocation -> {
|
||||
throw Mockito.mock(LibvirtException.class);
|
||||
})) {
|
||||
String result = storageProcessorSpy.convertBaseFileToSnapshotFileInPrimaryStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, volumeObjectToMock, 1);
|
||||
String result = storageProcessorSpy.convertBaseFileToSnapshotFileInStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR, volumeObjectToMock, 1);
|
||||
Assert.assertNotNull(result);
|
||||
}
|
||||
}
|
||||
|
|
@ -309,7 +308,7 @@ public class KVMStorageProcessorTest {
|
|||
try (MockedConstruction<QemuImg> ignored = Mockito.mockConstruction(QemuImg.class, (mock, context) -> {
|
||||
Mockito.doNothing().when(mock).convert(Mockito.any(QemuImgFile.class), Mockito.any(QemuImgFile.class));
|
||||
})) {
|
||||
String result = storageProcessorSpy.convertBaseFileToSnapshotFileInPrimaryStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, volumeObjectToMock, 1);
|
||||
String result = storageProcessorSpy.convertBaseFileToSnapshotFileInStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR, volumeObjectToMock, 1);
|
||||
Assert.assertNull(result);
|
||||
}
|
||||
}
|
||||
|
|
@ -487,4 +486,66 @@ public class KVMStorageProcessorTest {
|
|||
attachOrDetachDeviceTest( false, "vmName", diskDefMock);
|
||||
Mockito.verify(domainMock, Mockito.times(1)).detachDevice(Mockito.anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void generateBackupXmlTestNoParents() {
|
||||
String result = storageProcessorSpy.generateBackupXml(null, null, "vda", "path");
|
||||
|
||||
Assert.assertFalse(result.contains("<incremental>"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void generateBackupXmlTestWithParents() {
|
||||
String result = storageProcessorSpy.generateBackupXml(null, new String[]{"checkpointname"}, "vda", "path");
|
||||
|
||||
Assert.assertTrue(result.contains("<incremental>checkpointname</incremental>"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void createFolderOnCorrectStorageTestSecondaryIsNull() {
|
||||
storageProcessorSpy.createFolderOnCorrectStorage(kvmStoragePoolMock, null, new Pair<>("t", "u"));
|
||||
|
||||
Mockito.verify(kvmStoragePoolMock).createFolder("u");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void createFolderOnCorrectStorageTestSecondaryIsNotNull() {
|
||||
KVMStoragePool secondaryStoragePoolMock = Mockito.mock(KVMStoragePool.class);
|
||||
|
||||
storageProcessorSpy.createFolderOnCorrectStorage(kvmStoragePoolMock, secondaryStoragePoolMock, new Pair<>("t", "u"));
|
||||
|
||||
Mockito.verify(secondaryStoragePoolMock).createFolder("u");
|
||||
}
|
||||
|
||||
@Test (expected = CloudRuntimeException.class)
|
||||
public void getDiskLabelToSnapshotTestNoDisks() throws LibvirtException {
|
||||
storageProcessorSpy.getDiskLabelToSnapshot(new ArrayList<>(), null, Mockito.mock(Domain.class));
|
||||
}
|
||||
|
||||
@Test (expected = CloudRuntimeException.class)
|
||||
public void getDiskLabelToSnapshotTestDiskHasNoPath() throws LibvirtException {
|
||||
LibvirtVMDef.DiskDef diskDefMock1 = Mockito.mock(LibvirtVMDef.DiskDef.class);
|
||||
Mockito.doReturn(null).when(diskDefMock1).getDiskPath();
|
||||
|
||||
storageProcessorSpy.getDiskLabelToSnapshot(List.of(diskDefMock1), "Path", Mockito.mock(Domain.class));
|
||||
}
|
||||
|
||||
@Test (expected = CloudRuntimeException.class)
|
||||
public void getDiskLabelToSnapshotTestDiskPathDoesNotMatch() throws LibvirtException {
|
||||
LibvirtVMDef.DiskDef diskDefMock1 = Mockito.mock(LibvirtVMDef.DiskDef.class);
|
||||
Mockito.doReturn("test").when(diskDefMock1).getDiskPath();
|
||||
|
||||
storageProcessorSpy.getDiskLabelToSnapshot(List.of(diskDefMock1), "Path", Mockito.mock(Domain.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDiskLabelToSnapshotTestDiskMatches() throws LibvirtException {
|
||||
LibvirtVMDef.DiskDef diskDefMock1 = Mockito.mock(LibvirtVMDef.DiskDef.class);
|
||||
Mockito.doReturn("Path").when(diskDefMock1).getDiskPath();
|
||||
Mockito.doReturn("vda").when(diskDefMock1).getDiskLabel();
|
||||
|
||||
String result = storageProcessorSpy.getDiskLabelToSnapshot(List.of(diskDefMock1), "Path", Mockito.mock(Domain.class));
|
||||
|
||||
Assert.assertEquals("vda", result);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue