mirror of https://github.com/apache/cloudstack.git
Merge branch 'master' into ui-restyle
This commit is contained in:
commit
205f22b037
|
|
@ -19,14 +19,11 @@ package com.cloud.agent;
|
|||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
|
@ -39,9 +36,7 @@ import javax.naming.ConfigurationException;
|
|||
import org.apache.commons.daemon.Daemon;
|
||||
import org.apache.commons.daemon.DaemonContext;
|
||||
import org.apache.commons.daemon.DaemonInitException;
|
||||
import org.apache.commons.httpclient.HttpClient;
|
||||
import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
|
||||
import org.apache.commons.httpclient.methods.GetMethod;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.log4j.xml.DOMConfigurator;
|
||||
|
||||
|
|
@ -56,7 +51,6 @@ import com.cloud.utils.PropertiesUtil;
|
|||
import com.cloud.utils.backoff.BackoffAlgorithm;
|
||||
import com.cloud.utils.backoff.impl.ConstantTimeBackoff;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
public class AgentShell implements IAgentShell, Daemon {
|
||||
private static final Logger s_logger = Logger.getLogger(AgentShell.class
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ package com.cloud.agent.api.to;
|
|||
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
|
||||
|
||||
public interface DataStoreTO {
|
||||
public DataStoreRole getRole();
|
||||
public String getUuid();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ public class NfsTO implements DataStoreTO {
|
|||
|
||||
private String _url;
|
||||
private DataStoreRole _role;
|
||||
private String uuid;
|
||||
|
||||
public NfsTO() {
|
||||
|
||||
|
|
@ -55,6 +56,12 @@ public class NfsTO implements DataStoreTO {
|
|||
this._role = _role;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUuid() {
|
||||
return uuid;
|
||||
}
|
||||
|
||||
|
||||
public void setUuid(String uuid) {
|
||||
this.uuid = uuid;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
|
|||
private Integer socketTimeout;
|
||||
private Date created;
|
||||
private boolean enableRRS;
|
||||
private boolean multipartEnabled;
|
||||
|
||||
public S3TO() {
|
||||
|
||||
|
|
@ -50,7 +51,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
|
|||
final String secretKey, final String endPoint,
|
||||
final String bucketName, final Boolean httpsFlag,
|
||||
final Integer connectionTimeout, final Integer maxErrorRetry,
|
||||
final Integer socketTimeout, final Date created, final boolean enableRRS) {
|
||||
final Integer socketTimeout, final Date created, final boolean enableRRS, final boolean multipart) {
|
||||
|
||||
super();
|
||||
|
||||
|
|
@ -66,6 +67,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
|
|||
this.socketTimeout = socketTimeout;
|
||||
this.created = created;
|
||||
this.enableRRS = enableRRS;
|
||||
this.multipartEnabled = multipart;
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -268,7 +270,6 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
|
|||
}
|
||||
|
||||
|
||||
|
||||
public boolean getEnableRRS() {
|
||||
return enableRRS;
|
||||
}
|
||||
|
|
@ -277,5 +278,14 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
|
|||
this.enableRRS = enableRRS;
|
||||
}
|
||||
|
||||
public boolean isMultipartEnabled() {
|
||||
return multipartEnabled;
|
||||
}
|
||||
|
||||
public void setMultipartEnabled(boolean multipartEnabled) {
|
||||
this.multipartEnabled = multipartEnabled;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,8 +29,7 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg {
|
|||
|
||||
public SwiftTO() { }
|
||||
|
||||
public SwiftTO(Long id, String url, String account, String userName, String key
|
||||
) {
|
||||
public SwiftTO(Long id, String url, String account, String userName, String key) {
|
||||
this.id = id;
|
||||
this.url = url;
|
||||
this.account = account;
|
||||
|
|
@ -46,14 +45,17 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg {
|
|||
return url;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAccount() {
|
||||
return account;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUserName() {
|
||||
return userName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKey() {
|
||||
return key;
|
||||
}
|
||||
|
|
@ -67,4 +69,9 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg {
|
|||
public String getEndPoint() {
|
||||
return this.url;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUuid() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -199,6 +199,7 @@ public class EventTypes {
|
|||
// Snapshots
|
||||
public static final String EVENT_SNAPSHOT_CREATE = "SNAPSHOT.CREATE";
|
||||
public static final String EVENT_SNAPSHOT_DELETE = "SNAPSHOT.DELETE";
|
||||
public static final String EVENT_SNAPSHOT_REVERT = "SNAPSHOT.REVERT";
|
||||
public static final String EVENT_SNAPSHOT_POLICY_CREATE = "SNAPSHOTPOLICY.CREATE";
|
||||
public static final String EVENT_SNAPSHOT_POLICY_UPDATE = "SNAPSHOTPOLICY.UPDATE";
|
||||
public static final String EVENT_SNAPSHOT_POLICY_DELETE = "SNAPSHOTPOLICY.DELETE";
|
||||
|
|
@ -387,7 +388,7 @@ public class EventTypes {
|
|||
public static final String EVENT_RESOURCE_DETAILS_CREATE = "CREATE_RESOURCE_DETAILS";
|
||||
public static final String EVENT_RESOURCE_DETAILS_DELETE = "DELETE_RESOURCE_DETAILS";
|
||||
|
||||
// vm snapshot events
|
||||
// vm snapshot events
|
||||
public static final String EVENT_VM_SNAPSHOT_CREATE = "VMSNAPSHOT.CREATE";
|
||||
public static final String EVENT_VM_SNAPSHOT_DELETE = "VMSNAPSHOT.DELETE";
|
||||
public static final String EVENT_VM_SNAPSHOT_REVERT = "VMSNAPSHOT.REVERTTO";
|
||||
|
|
@ -444,7 +445,7 @@ public class EventTypes {
|
|||
public static final String EVENT_DEDICATE_RESOURCE_RELEASE = "DEDICATE.RESOURCE.RELEASE";
|
||||
|
||||
public static final String EVENT_CLEANUP_VM_RESERVATION = "VM.RESERVATION.CLEANUP";
|
||||
|
||||
|
||||
public static final String EVENT_UCS_ASSOCIATED_PROFILE = "UCS.ASSOCIATEPROFILE";
|
||||
|
||||
static {
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ public interface VolumeApiService {
|
|||
|
||||
Snapshot allocSnapshot(Long volumeId, Long policyId)
|
||||
throws ResourceAllocationException;
|
||||
Volume updateVolume(long volumeId, String path, String state, Long storageId);
|
||||
Volume updateVolume(long volumeId, String path, String state, Long storageId, Boolean displayVolume);
|
||||
|
||||
/**
|
||||
* Extracts the volume to a particular location.
|
||||
|
|
|
|||
|
|
@ -106,4 +106,6 @@ public interface SnapshotApiService {
|
|||
* @return
|
||||
*/
|
||||
Long getHostIdForSnapshotOperation(Volume vol);
|
||||
|
||||
boolean revertSnapshot(Long snapshotId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,6 +49,9 @@ public class UpdateDiskOfferingCmd extends BaseCmd{
|
|||
@Parameter(name=ApiConstants.SORT_KEY, type=CommandType.INTEGER, description="sort key of the disk offering, integer")
|
||||
private Integer sortKey;
|
||||
|
||||
@Parameter(name=ApiConstants.DISPLAY_OFFERING, type=CommandType.BOOLEAN, description="an optional field, whether to display the offering to the end user or not.")
|
||||
private Boolean displayOffering;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -69,8 +72,11 @@ public class UpdateDiskOfferingCmd extends BaseCmd{
|
|||
return sortKey;
|
||||
}
|
||||
|
||||
public Boolean getDisplayOffering() {
|
||||
return displayOffering;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ public class ListStoragePoolsCmd extends BaseListCmd {
|
|||
@Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneResponse.class,
|
||||
description="the Zone ID for the storage pool")
|
||||
private Long zoneId;
|
||||
|
||||
|
||||
@Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType = StoragePoolResponse.class,
|
||||
description="the ID of the storage pool")
|
||||
private Long id;
|
||||
|
|
@ -109,6 +109,7 @@ public class ListStoragePoolsCmd extends BaseListCmd {
|
|||
return s_name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ApiCommandJobType getInstanceType() {
|
||||
return ApiCommandJobType.StoragePool;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.api.command.user.snapshot;
|
||||
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.storage.Snapshot;
|
||||
import com.cloud.user.Account;
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiCommandJobType;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
import org.apache.cloudstack.api.BaseAsyncCmd;
|
||||
import org.apache.cloudstack.api.BaseCmd;
|
||||
import org.apache.cloudstack.api.Parameter;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.api.response.SnapshotResponse;
|
||||
import org.apache.cloudstack.api.response.SuccessResponse;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
|
||||
@APICommand(name = "RevertSnapshot", description = "revert a volume snapshot.", responseObject = SnapshotResponse.class)
|
||||
public class RevertSnapshotCmd extends BaseAsyncCmd {
|
||||
private static final String s_name = "revertsnapshotresponse";
|
||||
@Parameter(name= ApiConstants.ID, type= BaseCmd.CommandType.UUID, entityType = SnapshotResponse.class,
|
||||
required=true, description="The ID of the snapshot")
|
||||
private Long id;
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEntityOwnerId() {
|
||||
Snapshot snapshot = _entityMgr.findById(Snapshot.class, getId());
|
||||
if (snapshot != null) {
|
||||
return snapshot.getAccountId();
|
||||
}
|
||||
|
||||
return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEventType() {
|
||||
return EventTypes.EVENT_SNAPSHOT_REVERT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
return "revert snapshot: " + getId();
|
||||
}
|
||||
|
||||
public ApiCommandJobType getInstanceType() {
|
||||
return ApiCommandJobType.Snapshot;
|
||||
}
|
||||
|
||||
public Long getInstanceId() {
|
||||
return getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(){
|
||||
CallContext.current().setEventDetails("Snapshot Id: "+getId());
|
||||
boolean result = _snapshotService.revertSnapshot(getId());
|
||||
if (result) {
|
||||
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
} else {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to revert snapshot");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -35,7 +35,7 @@ import com.cloud.storage.Volume;
|
|||
@APICommand(name = "updateVolume", description="Updates the volume.", responseObject=VolumeResponse.class)
|
||||
public class UpdateVolumeCmd extends BaseAsyncCmd {
|
||||
public static final Logger s_logger = Logger.getLogger(UpdateVolumeCmd.class.getName());
|
||||
private static final String s_name = "addVolumeresponse";
|
||||
private static final String s_name = "updatevolumeresponse";
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// API parameters /////////////////////
|
||||
|
|
@ -54,6 +54,9 @@ public class UpdateVolumeCmd extends BaseAsyncCmd {
|
|||
@Parameter(name=ApiConstants.STATE, type=CommandType.STRING, description="The state of the volume", since="4.3")
|
||||
private String state;
|
||||
|
||||
@Parameter(name=ApiConstants.DISPLAY_VOLUME, type=CommandType.BOOLEAN, description="an optional field, whether to the display the volume to the end user or not.")
|
||||
private Boolean displayVolume;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -73,9 +76,12 @@ public class UpdateVolumeCmd extends BaseAsyncCmd {
|
|||
public String getState() {
|
||||
return state;
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
public Boolean getDisplayVolume() {
|
||||
return displayVolume;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
|
|
@ -126,7 +132,7 @@ public class UpdateVolumeCmd extends BaseAsyncCmd {
|
|||
@Override
|
||||
public void execute(){
|
||||
CallContext.current().setEventDetails("Volume Id: "+getId());
|
||||
Volume result = _volumeService.updateVolume(getId(), getPath(), getState(), getStorageId());
|
||||
Volume result = _volumeService.updateVolume(getId(), getPath(), getState(), getStorageId(), getDisplayVolume());
|
||||
if (result != null) {
|
||||
VolumeResponse response = _responseGenerator.createVolumeResponse(result);
|
||||
response.setResponseName(getCommandName());
|
||||
|
|
|
|||
|
|
@ -178,12 +178,26 @@ public class VolumeResponse extends BaseResponse implements ControlledViewEntity
|
|||
@Param(description="the status of the volume")
|
||||
private String status;
|
||||
|
||||
@SerializedName(ApiConstants.TAGS) @Param(description="the list of resource tags associated with volume", responseObject = ResourceTagResponse.class)
|
||||
@SerializedName(ApiConstants.TAGS)
|
||||
@Param(description="the list of resource tags associated with volume", responseObject = ResourceTagResponse.class)
|
||||
private Set<ResourceTagResponse> tags;
|
||||
|
||||
@SerializedName(ApiConstants.DISPLAY_VOLUME) @Param(description="an optional field whether to the display the volume to the end user or not.")
|
||||
@SerializedName(ApiConstants.DISPLAY_VOLUME)
|
||||
@Param(description="an optional field whether to the display the volume to the end user or not.")
|
||||
private Boolean displayVm;
|
||||
|
||||
@SerializedName(ApiConstants.PATH)
|
||||
@Param(description="The path of the volume")
|
||||
private String path;
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public void setPath(String path) {
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
public VolumeResponse(){
|
||||
tags = new LinkedHashSet<ResourceTagResponse>();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ public class AttachVolumeCommand extends Command {
|
|||
private StoragePoolType pooltype;
|
||||
private String volumePath;
|
||||
private String volumeName;
|
||||
private Long volumeSize;
|
||||
private Long deviceId;
|
||||
private String chainInfo;
|
||||
private String poolUuid;
|
||||
|
|
@ -45,13 +46,14 @@ public class AttachVolumeCommand extends Command {
|
|||
|
||||
public AttachVolumeCommand(boolean attach, boolean managed, String vmName,
|
||||
StoragePoolType pooltype, String volumePath, String volumeName,
|
||||
Long deviceId, String chainInfo) {
|
||||
Long volumeSize, Long deviceId, String chainInfo) {
|
||||
this.attach = attach;
|
||||
this._managed = managed;
|
||||
this.vmName = vmName;
|
||||
this.pooltype = pooltype;
|
||||
this.volumePath = volumePath;
|
||||
this.volumeName = volumeName;
|
||||
this.volumeSize = volumeSize;
|
||||
this.deviceId = deviceId;
|
||||
this.chainInfo = chainInfo;
|
||||
}
|
||||
|
|
@ -85,6 +87,10 @@ public class AttachVolumeCommand extends Command {
|
|||
return volumeName;
|
||||
}
|
||||
|
||||
public Long getVolumeSize() {
|
||||
return volumeSize;
|
||||
}
|
||||
|
||||
public Long getDeviceId() {
|
||||
return deviceId;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,8 +23,12 @@ import org.apache.cloudstack.storage.command.CopyCommand;
|
|||
import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DettachCommand;
|
||||
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
|
||||
|
||||
public interface StorageProcessor {
|
||||
public Answer copyTemplateToPrimaryStorage(CopyCommand cmd);
|
||||
|
|
@ -43,4 +47,6 @@ public interface StorageProcessor {
|
|||
public Answer deleteVolume(DeleteCommand cmd);
|
||||
public Answer createVolumeFromSnapshot(CopyCommand cmd);
|
||||
public Answer deleteSnapshot(DeleteCommand cmd);
|
||||
Answer introduceObject(IntroduceObjectCmd cmd);
|
||||
Answer forgetObject(ForgetObjectCmd cmd);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
|||
import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DettachCommand;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
|
|
@ -55,6 +56,8 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma
|
|||
return execute((AttachCommand)command);
|
||||
} else if (command instanceof DettachCommand) {
|
||||
return execute((DettachCommand)command);
|
||||
} else if (command instanceof IntroduceObjectCmd) {
|
||||
return processor.introduceObject((IntroduceObjectCmd)command);
|
||||
}
|
||||
return new Answer((Command)command, false, "not implemented yet");
|
||||
}
|
||||
|
|
@ -65,7 +68,7 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma
|
|||
DataStoreTO srcDataStore = srcData.getDataStore();
|
||||
DataStoreTO destDataStore = destData.getDataStore();
|
||||
|
||||
if ((srcData.getObjectType() == DataObjectType.TEMPLATE) && (srcDataStore instanceof NfsTO) && (destData.getDataStore().getRole() == DataStoreRole.Primary)) {
|
||||
if ((srcData.getObjectType() == DataObjectType.TEMPLATE) && (destData.getObjectType() == DataObjectType.TEMPLATE && destData.getDataStore().getRole() == DataStoreRole.Primary)) {
|
||||
//copy template to primary storage
|
||||
return processor.copyTemplateToPrimaryStorage(cmd);
|
||||
} else if (srcData.getObjectType() == DataObjectType.TEMPLATE && srcDataStore.getRole() == DataStoreRole.Primary && destDataStore.getRole() == DataStoreRole.Primary) {
|
||||
|
|
@ -80,18 +83,19 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma
|
|||
} else if (destData.getObjectType() == DataObjectType.TEMPLATE) {
|
||||
return processor.createTemplateFromVolume(cmd);
|
||||
}
|
||||
} else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) {
|
||||
} else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.SNAPSHOT &&
|
||||
destData.getDataStore().getRole() == DataStoreRole.Primary) {
|
||||
return processor.backupSnapshot(cmd);
|
||||
} else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.VOLUME) {
|
||||
return processor.createVolumeFromSnapshot(cmd);
|
||||
return processor.createVolumeFromSnapshot(cmd);
|
||||
} else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.TEMPLATE) {
|
||||
return processor.createTemplateFromSnapshot(cmd);
|
||||
}
|
||||
|
||||
return new Answer(cmd, false, "not implemented yet");
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
protected Answer execute(CreateObjectCommand cmd) {
|
||||
DataTO data = cmd.getData();
|
||||
try {
|
||||
|
|
@ -106,21 +110,21 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma
|
|||
return new CreateObjectAnswer(e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected Answer execute(DeleteCommand cmd) {
|
||||
DataTO data = cmd.getData();
|
||||
Answer answer = null;
|
||||
if (data.getObjectType() == DataObjectType.VOLUME) {
|
||||
answer = processor.deleteVolume(cmd);
|
||||
} else if (data.getObjectType() == DataObjectType.SNAPSHOT) {
|
||||
answer = processor.deleteSnapshot(cmd);
|
||||
answer = processor.deleteSnapshot(cmd);
|
||||
} else {
|
||||
answer = new Answer(cmd, false, "unsupported type");
|
||||
}
|
||||
|
||||
return answer;
|
||||
}
|
||||
|
||||
|
||||
protected Answer execute(AttachCommand cmd) {
|
||||
DiskTO disk = cmd.getDisk();
|
||||
if (disk.getType() == Volume.Type.ISO) {
|
||||
|
|
@ -129,7 +133,7 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma
|
|||
return processor.attachVolume(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected Answer execute(DettachCommand cmd) {
|
||||
DiskTO disk = cmd.getDisk();
|
||||
if (disk.getType() == Volume.Type.ISO) {
|
||||
|
|
|
|||
|
|
@ -47,8 +47,6 @@ import com.amazonaws.services.s3.model.ProgressEvent;
|
|||
import com.amazonaws.services.s3.model.ProgressListener;
|
||||
import com.amazonaws.services.s3.model.PutObjectRequest;
|
||||
import com.amazonaws.services.s3.model.StorageClass;
|
||||
import com.amazonaws.services.s3.transfer.TransferManager;
|
||||
import com.amazonaws.services.s3.transfer.Upload;
|
||||
|
||||
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
|
||||
|
|
@ -227,9 +225,6 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp
|
|||
// compute s3 key
|
||||
s3Key = join(asList(installPath, fileName), S3Utils.SEPARATOR);
|
||||
|
||||
// multi-part upload using S3 api to handle > 5G input stream
|
||||
TransferManager tm = new TransferManager(S3Utils.acquireClient(s3));
|
||||
|
||||
// download using S3 API
|
||||
ObjectMetadata metadata = new ObjectMetadata();
|
||||
metadata.setContentLength(remoteSize);
|
||||
|
|
@ -262,11 +257,19 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp
|
|||
}
|
||||
|
||||
});
|
||||
// TransferManager processes all transfers asynchronously,
|
||||
// so this call will return immediately.
|
||||
Upload upload = tm.upload(putObjectRequest);
|
||||
|
||||
upload.waitForCompletion();
|
||||
|
||||
if ( s3.isMultipartEnabled()){
|
||||
// use TransferManager to do multipart upload
|
||||
S3Utils.mputObject(s3, putObjectRequest);
|
||||
} else{
|
||||
// single part upload, with 5GB limit in Amazon
|
||||
S3Utils.putObject(s3, putObjectRequest);
|
||||
while (status != TemplateDownloader.Status.DOWNLOAD_FINISHED &&
|
||||
status != TemplateDownloader.Status.UNRECOVERABLE_ERROR &&
|
||||
status != TemplateDownloader.Status.ABORTED) {
|
||||
// wait for completion
|
||||
}
|
||||
}
|
||||
|
||||
// finished or aborted
|
||||
Date finish = new Date();
|
||||
|
|
|
|||
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
|
||||
public class ForgetObjectCmd extends Command implements StorageSubSystemCommand {
|
||||
private DataTO dataTO;
|
||||
public ForgetObjectCmd(DataTO data) {
|
||||
this.dataTO = data;
|
||||
}
|
||||
|
||||
public DataTO getDataTO() {
|
||||
return this.dataTO;
|
||||
}
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
|
||||
public class IntroduceObjectAnswer extends Answer {
|
||||
private DataTO dataTO;
|
||||
public IntroduceObjectAnswer(DataTO dataTO) {
|
||||
this.dataTO = dataTO;
|
||||
}
|
||||
|
||||
public DataTO getDataTO() {
|
||||
return this.dataTO;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
|
||||
public class IntroduceObjectCmd extends Command implements StorageSubSystemCommand {
|
||||
private DataTO dataTO;
|
||||
public IntroduceObjectCmd(DataTO dataTO) {
|
||||
this.dataTO = dataTO;
|
||||
}
|
||||
|
||||
public DataTO getDataTO() {
|
||||
return this.dataTO;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
@ -26,6 +26,7 @@ public class ImageStoreTO implements DataStoreTO {
|
|||
private String uri;
|
||||
private String providerName;
|
||||
private DataStoreRole role;
|
||||
private String uuid;
|
||||
|
||||
public ImageStoreTO() {
|
||||
|
||||
|
|
@ -76,4 +77,13 @@ public class ImageStoreTO implements DataStoreTO {
|
|||
return new StringBuilder("ImageStoreTO[type=").append(type).append("|provider=").append(providerName)
|
||||
.append("|role=").append(role).append("|uri=").append(uri).append("]").toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUuid() {
|
||||
return uuid;
|
||||
}
|
||||
|
||||
public void setUuid(String uuid) {
|
||||
this.uuid = uuid;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ public class PrimaryDataStoreTO implements DataStoreTO {
|
|||
return this.id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUuid() {
|
||||
return this.uuid;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import com.cloud.storage.Storage.StoragePoolType;
|
|||
|
||||
public class AttachVolumeAnswerTest {
|
||||
AttachVolumeCommand avc = new AttachVolumeCommand(true, false, "vmname",
|
||||
StoragePoolType.Filesystem, "vPath", "vName",
|
||||
StoragePoolType.Filesystem, "vPath", "vName", 1073741824L,
|
||||
123456789L, "chainInfo");
|
||||
AttachVolumeAnswer ava1 = new AttachVolumeAnswer(avc);
|
||||
String results = "";
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import com.cloud.storage.Storage.StoragePoolType;
|
|||
|
||||
public class AttachVolumeCommandTest {
|
||||
AttachVolumeCommand avc = new AttachVolumeCommand(true, false, "vmname",
|
||||
StoragePoolType.Filesystem, "vPath", "vName",
|
||||
StoragePoolType.Filesystem, "vPath", "vName", 1073741824L,
|
||||
123456789L, "chainInfo");
|
||||
|
||||
@Test
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
cloudstack (4.3.0) unstable; urgency=low
|
||||
cloudstack (4.3.0-snapshot) unstable; urgency=low
|
||||
|
||||
* Update the version to 4.3.0.snapshot
|
||||
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
DEBVERS := $(shell dpkg-parsechangelog | sed -n -e 's/^Version: //p')
|
||||
VERSION := $(shell echo '$(DEBVERS)' | sed -e 's/^[[:digit:]]*://' -e 's/[~-].*//')
|
||||
MVNADD := $(shell if echo '$(DEBVERS)' | grep -q snapshot; then echo -SNAPSHOT; fi )
|
||||
PACKAGE = $(shell dh_listpackages|head -n 1|cut -d '-' -f 1)
|
||||
SYSCONFDIR = "/etc"
|
||||
DESTDIR = "debian/tmp"
|
||||
|
|
@ -65,8 +66,8 @@ install:
|
|||
mkdir $(DESTDIR)/var/log/$(PACKAGE)/agent
|
||||
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent
|
||||
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent/plugins
|
||||
install -D agent/target/cloud-agent-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/$(PACKAGE)-agent.jar
|
||||
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
|
||||
install -D agent/target/cloud-agent-$(VERSION)$(MVNADD).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/$(PACKAGE)-agent.jar
|
||||
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION)$(MVNADD).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
|
||||
install -D plugins/hypervisors/kvm/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
|
||||
install -D packaging/debian/init/cloud-agent $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-agent
|
||||
install -D agent/target/transformed/cloud-setup-agent $(DESTDIR)/usr/bin/cloudstack-setup-agent
|
||||
|
|
@ -92,7 +93,7 @@ install:
|
|||
mkdir $(DESTDIR)/var/lib/$(PACKAGE)/management
|
||||
mkdir $(DESTDIR)/var/lib/$(PACKAGE)/mnt
|
||||
cp -r client/target/utilities/scripts/db/* $(DESTDIR)/usr/share/$(PACKAGE)-management/setup/
|
||||
cp -r client/target/cloud-client-ui-$(VERSION)-SNAPSHOT/* $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client/
|
||||
cp -r client/target/cloud-client-ui-$(VERSION)$(MVNADD)/* $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client/
|
||||
cp server/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/
|
||||
cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/
|
||||
|
||||
|
|
@ -145,7 +146,7 @@ install:
|
|||
mkdir $(DESTDIR)/var/log/$(PACKAGE)/usage
|
||||
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-usage
|
||||
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-usage/plugins
|
||||
install -D usage/target/cloud-usage-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/$(PACKAGE)-usage.jar
|
||||
install -D usage/target/cloud-usage-$(VERSION)$(MVNADD).jar $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/$(PACKAGE)-usage.jar
|
||||
install -D usage/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/
|
||||
cp usage/target/transformed/db.properties $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/
|
||||
cp usage/target/transformed/log4j-cloud_usage.xml $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/log4j-cloud.xml
|
||||
|
|
@ -158,7 +159,7 @@ install:
|
|||
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-bridge/webapps/awsapi
|
||||
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-bridge/setup
|
||||
ln -s /usr/share/$(PACKAGE)-bridge/webapps/awsapi $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps7080/awsapi
|
||||
cp -r awsapi/target/cloud-awsapi-$(VERSION)-SNAPSHOT/* $(DESTDIR)/usr/share/$(PACKAGE)-bridge/webapps/awsapi
|
||||
cp -r awsapi/target/cloud-awsapi-$(VERSION)$(MVNADD)/* $(DESTDIR)/usr/share/$(PACKAGE)-bridge/webapps/awsapi
|
||||
install -D awsapi-setup/setup/cloud-setup-bridge $(DESTDIR)/usr/bin/cloudstack-setup-bridge
|
||||
install -D awsapi-setup/setup/cloudstack-aws-api-register $(DESTDIR)/usr/bin/cloudstack-aws-api-register
|
||||
cp -r awsapi-setup/db/mysql/* $(DESTDIR)/usr/share/$(PACKAGE)-bridge/setup
|
||||
|
|
|
|||
|
|
@ -28,4 +28,6 @@ public interface EndPointSelector {
|
|||
EndPoint select(DataStore store);
|
||||
|
||||
List<EndPoint> selectAll(DataStore store);
|
||||
|
||||
EndPoint select(Scope scope, Long storeId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,5 +24,5 @@ public interface SnapshotService {
|
|||
|
||||
boolean deleteSnapshot(SnapshotInfo snapshot);
|
||||
|
||||
boolean revertSnapshot(SnapshotInfo snapshot);
|
||||
boolean revertSnapshot(Long snapshotId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.engine.subsystem.api.storage;
|
||||
|
|
@ -25,5 +25,7 @@ public interface SnapshotStrategy {
|
|||
|
||||
boolean deleteSnapshot(Long snapshotId);
|
||||
|
||||
boolean revertSnapshot(Long snapshotId);
|
||||
|
||||
boolean canHandle(Snapshot snapshot);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,16 +43,13 @@ import javax.naming.ConfigurationException;
|
|||
import javax.net.ssl.SSLContext;
|
||||
import javax.net.ssl.SSLEngine;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.google.gson.Gson;
|
||||
|
||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
|
||||
import org.apache.cloudstack.managed.context.ManagedContextTimerTask;
|
||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
|
|
@ -86,7 +83,6 @@ import com.cloud.host.Status.Event;
|
|||
import com.cloud.resource.ServerResource;
|
||||
import com.cloud.serializer.GsonHelper;
|
||||
import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.Profiler;
|
||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||
import com.cloud.utils.db.QueryBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
|
@ -94,36 +90,35 @@ import com.cloud.utils.db.Transaction;
|
|||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.nio.Link;
|
||||
import com.cloud.utils.nio.Task;
|
||||
import com.google.gson.Gson;
|
||||
|
||||
@Local(value = { AgentManager.class, ClusteredAgentRebalanceService.class })
|
||||
public class ClusteredAgentManagerImpl extends AgentManagerImpl implements ClusterManagerListener, ClusteredAgentRebalanceService {
|
||||
final static Logger s_logger = Logger.getLogger(ClusteredAgentManagerImpl.class);
|
||||
private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Cluster-AgentTransferExecutor"));
|
||||
private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor"));
|
||||
private final long rebalanceTimeOut = 300000; // 5 mins - after this time remove the agent from the transfer list
|
||||
|
||||
public final static long STARTUP_DELAY = 5000;
|
||||
public final static long SCAN_INTERVAL = 90000; // 90 seconds, it takes 60 sec for xenserver to fail login
|
||||
public final static int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds
|
||||
protected Set<Long> _agentToTransferIds = new HashSet<Long>();
|
||||
|
||||
Gson _gson;
|
||||
|
||||
@Inject
|
||||
protected ClusterManager _clusterMgr = null;
|
||||
|
||||
protected HashMap<String, SocketChannel> _peers;
|
||||
protected HashMap<String, SSLEngine> _sslEngines;
|
||||
private final Timer _timer = new Timer("ClusteredAgentManager Timer");
|
||||
|
||||
private final Timer _agentLbTimer = new Timer("ClusteredAgentManager AgentRebalancing Timer");
|
||||
boolean _agentLbHappened = false;
|
||||
|
||||
@Inject
|
||||
protected ClusterManager _clusterMgr = null;
|
||||
@Inject
|
||||
protected ManagementServerHostDao _mshostDao;
|
||||
@Inject
|
||||
protected HostTransferMapDao _hostTransferDao;
|
||||
|
||||
// @com.cloud.utils.component.Inject(adapter = AgentLoadBalancerPlanner.class)
|
||||
@Inject protected List<AgentLoadBalancerPlanner> _lbPlanners;
|
||||
|
||||
@Inject ConfigurationDao _configDao;
|
||||
@Inject
|
||||
protected List<AgentLoadBalancerPlanner> _lbPlanners;
|
||||
@Inject
|
||||
ConfigurationDao _configDao;
|
||||
@Inject
|
||||
ConfigDepot _configDepot;
|
||||
|
||||
|
|
@ -168,9 +163,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds");
|
||||
}
|
||||
|
||||
// schedule transfer scan executor - if agent LB is enabled
|
||||
|
||||
// Schedule tasks for agent rebalancing
|
||||
if (isAgentRebalanceEnabled()) {
|
||||
s_transferExecutor.scheduleAtFixedRate(getAgentRebalanceScanTask(), 60000, 60000, TimeUnit.MILLISECONDS);
|
||||
s_transferExecutor.scheduleAtFixedRate(getTransferScanTask(), 60000, ClusteredAgentRebalanceService.DEFAULT_TRANSFER_CHECK_INTERVAL,
|
||||
TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
|
@ -571,6 +567,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
}
|
||||
}
|
||||
_timer.cancel();
|
||||
_agentLbTimer.cancel();
|
||||
|
||||
//cancel all transfer tasks
|
||||
s_transferExecutor.shutdownNow();
|
||||
|
|
@ -1354,44 +1351,52 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
}
|
||||
|
||||
public boolean rebalanceAgent(long agentId, Event event, long currentOwnerId, long futureOwnerId) throws AgentUnavailableException, OperationTimedoutException {
|
||||
return _rebalanceService.executeRebalanceRequest(agentId, currentOwnerId, futureOwnerId, event);
|
||||
return executeRebalanceRequest(agentId, currentOwnerId, futureOwnerId, event);
|
||||
}
|
||||
|
||||
public boolean isAgentRebalanceEnabled() {
|
||||
return EnableLB.value();
|
||||
}
|
||||
|
||||
private ClusteredAgentRebalanceService _rebalanceService;
|
||||
|
||||
boolean _agentLbHappened = false;
|
||||
public void agentrebalance() {
|
||||
Profiler profilerAgentLB = new Profiler();
|
||||
profilerAgentLB.start();
|
||||
//initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold
|
||||
if (EnableLB.value() && !_agentLbHappened) {
|
||||
QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
|
||||
sc.and(sc.entity().getManagementServerId(), Op.NNULL);
|
||||
sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing);
|
||||
List<HostVO> allManagedRoutingAgents = sc.list();
|
||||
|
||||
sc = QueryBuilder.create(HostVO.class);
|
||||
sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing);
|
||||
List<HostVO> allAgents = sc.list();
|
||||
double allHostsCount = allAgents.size();
|
||||
double managedHostsCount = allManagedRoutingAgents.size();
|
||||
if (allHostsCount > 0.0) {
|
||||
double load = managedHostsCount / allHostsCount;
|
||||
if (load >= ConnectedAgentThreshold.value()) {
|
||||
s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value());
|
||||
_rebalanceService.scheduleRebalanceAgents();
|
||||
_agentLbHappened = true;
|
||||
} else {
|
||||
s_logger.trace("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + ConnectedAgentThreshold.value());
|
||||
|
||||
|
||||
private Runnable getAgentRebalanceScanTask() {
|
||||
return new ManagedContextRunnable() {
|
||||
@Override
|
||||
protected void runInContext() {
|
||||
try {
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Agent rebalance task check, management server id:" + _nodeId);
|
||||
}
|
||||
//initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold
|
||||
if (!_agentLbHappened) {
|
||||
QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
|
||||
sc.and(sc.entity().getManagementServerId(), Op.NNULL);
|
||||
sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing);
|
||||
List<HostVO> allManagedRoutingAgents = sc.list();
|
||||
|
||||
sc = QueryBuilder.create(HostVO.class);
|
||||
sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing);
|
||||
List<HostVO> allAgents = sc.list();
|
||||
double allHostsCount = allAgents.size();
|
||||
double managedHostsCount = allManagedRoutingAgents.size();
|
||||
if (allHostsCount > 0.0) {
|
||||
double load = managedHostsCount / allHostsCount;
|
||||
if (load >= ConnectedAgentThreshold.value()) {
|
||||
s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value());
|
||||
scheduleRebalanceAgents();
|
||||
_agentLbHappened = true;
|
||||
} else {
|
||||
s_logger.debug("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + ConnectedAgentThreshold.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
s_logger.error("Problem with the clustered agent transfer scan check!", e);
|
||||
}
|
||||
}
|
||||
profilerAgentLB.stop();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void rescan() {
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
|
||||
@Component(value="EngineHostDao")
|
||||
@Local(value = { EngineHostDao.class })
|
||||
@DB(txn = false)
|
||||
@DB
|
||||
@TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1)
|
||||
public class EngineHostDaoImpl extends GenericDaoBase<EngineHostVO, Long> implements EngineHostDao {
|
||||
private static final Logger s_logger = Logger.getLogger(EngineHostDaoImpl.class);
|
||||
|
|
|
|||
|
|
@ -17,9 +17,6 @@
|
|||
package com.cloud.certificate.dao;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
|
@ -32,7 +29,7 @@ import com.cloud.utils.db.DB;
|
|||
import com.cloud.utils.db.GenericDaoBase;
|
||||
|
||||
@Component
|
||||
@Local(value={CertificateDao.class}) @DB(txn=false)
|
||||
@Local(value={CertificateDao.class}) @DB
|
||||
public class CertificateDaoImpl extends GenericDaoBase<CertificateVO, Long> implements CertificateDao {
|
||||
|
||||
private static final Logger s_logger = Logger.getLogger(CertificateDaoImpl.class);
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
|
||||
@Component
|
||||
@Local(value = { HostTransferMapDao.class })
|
||||
@DB(txn = false)
|
||||
@DB
|
||||
public class HostTransferMapDaoImpl extends GenericDaoBase<HostTransferMapVO, Long> implements HostTransferMapDao {
|
||||
private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import com.cloud.utils.db.Transaction;
|
|||
|
||||
@Component
|
||||
@Local(value=ClusterVSMMapDao.class)
|
||||
@DB(txn = false)
|
||||
@DB
|
||||
public class ClusterVSMMapDaoImpl extends GenericDaoBase<ClusterVSMMapVO, Long> implements ClusterVSMMapDao {
|
||||
|
||||
final SearchBuilder<ClusterVSMMapVO> ClusterSearch;
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
import com.cloud.utils.net.NetUtils;
|
||||
|
||||
@Component
|
||||
@Local(value={DataCenterIpAddressDao.class}) @DB(txn=false)
|
||||
@Local(value={DataCenterIpAddressDao.class}) @DB
|
||||
public class DataCenterIpAddressDaoImpl extends GenericDaoBase<DataCenterIpAddressVO, Long> implements DataCenterIpAddressDao {
|
||||
private static final Logger s_logger = Logger.getLogger(DataCenterIpAddressDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@ import org.springframework.stereotype.Component;
|
|||
|
||||
import com.cloud.dc.DataCenterLinkLocalIpAddressVO;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.GenericSearchBuilder;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
|
|
@ -41,7 +40,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
import com.cloud.utils.net.NetUtils;
|
||||
|
||||
@Component
|
||||
@Local(value={DataCenterLinkLocalIpAddressDaoImpl.class}) @DB(txn=false)
|
||||
@Local(value={DataCenterLinkLocalIpAddressDaoImpl.class}) @DB
|
||||
public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase<DataCenterLinkLocalIpAddressVO, Long> implements DataCenterLinkLocalIpAddressDao {
|
||||
private static final Logger s_logger = Logger.getLogger(DataCenterLinkLocalIpAddressDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -25,14 +25,12 @@ import java.util.Map;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.dc.DataCenterVnetVO;
|
||||
import com.cloud.network.dao.AccountGuestVlanMapDao;
|
||||
import com.cloud.network.dao.AccountGuestVlanMapVO;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.GenericSearchBuilder;
|
||||
import com.cloud.utils.db.JoinBuilder;
|
||||
|
|
@ -48,7 +46,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
* data center/physical_network and the vnet that appears within the physical network.
|
||||
*/
|
||||
@Component
|
||||
@DB(txn=false)
|
||||
@DB
|
||||
public class DataCenterVnetDaoImpl extends GenericDaoBase<DataCenterVnetVO, Long> implements DataCenterVnetDao {
|
||||
|
||||
private final SearchBuilder<DataCenterVnetVO> FreeVnetSearch;
|
||||
|
|
|
|||
|
|
@ -18,17 +18,13 @@ package com.cloud.dc.dao;
|
|||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.dc.DataCenterIpAddressVO;
|
||||
import com.cloud.dc.StorageNetworkIpAddressVO;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.Filter;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.GenericSearchBuilder;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
|
|
@ -37,10 +33,11 @@ import com.cloud.utils.db.GenericQueryBuilder;
|
|||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.SearchCriteria.Func;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
|
||||
@Component
|
||||
@Local(value={StorageNetworkIpAddressDao.class})
|
||||
@DB(txn=false)
|
||||
@DB
|
||||
public class StorageNetworkIpAddressDaoImpl extends GenericDaoBase<StorageNetworkIpAddressVO, Long> implements StorageNetworkIpAddressDao {
|
||||
protected final GenericSearchBuilder<StorageNetworkIpAddressVO, Long> countInUserIp;
|
||||
protected final GenericSearchBuilder<StorageNetworkIpAddressVO, String> listInUseIp;
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria.Op;
|
|||
|
||||
@Component
|
||||
@Local(value={StorageNetworkIpRangeDao.class})
|
||||
@DB(txn=false)
|
||||
@DB
|
||||
public class StorageNetworkIpRangeDaoImpl extends GenericDaoBase<StorageNetworkIpRangeVO, Long> implements StorageNetworkIpRangeDao {
|
||||
protected final GenericSearchBuilder<StorageNetworkIpRangeVO, Long> countRanges;
|
||||
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
|
||||
@Component
|
||||
@Local(value = {HostDao.class})
|
||||
@DB(txn = false)
|
||||
@DB
|
||||
@TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1)
|
||||
public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao { //FIXME: , ExternalIdDao {
|
||||
private static final Logger s_logger = Logger.getLogger(HostDaoImpl.class);
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
|
||||
@Component
|
||||
@Local(value={AccountGuestVlanMapDao.class})
|
||||
@DB(txn=false)
|
||||
@DB
|
||||
public class AccountGuestVlanMapDaoImpl extends GenericDaoBase<AccountGuestVlanMapVO, Long> implements AccountGuestVlanMapDao {
|
||||
|
||||
protected SearchBuilder<AccountGuestVlanMapVO> AccountSearch;
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=ExternalFirewallDeviceDao.class) @DB(txn=false)
|
||||
@Local(value=ExternalFirewallDeviceDao.class) @DB
|
||||
public class ExternalFirewallDeviceDaoImpl extends GenericDaoBase<ExternalFirewallDeviceVO, Long> implements ExternalFirewallDeviceDao {
|
||||
final SearchBuilder<ExternalFirewallDeviceVO> physicalNetworkServiceProviderSearch;
|
||||
final SearchBuilder<ExternalFirewallDeviceVO> physicalNetworkIdSearch;
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import javax.ejb.Local;
|
|||
import java.util.List;
|
||||
|
||||
@Component
|
||||
@Local(value=ExternalLoadBalancerDeviceDao.class) @DB(txn=false)
|
||||
@Local(value=ExternalLoadBalancerDeviceDao.class) @DB
|
||||
public class ExternalLoadBalancerDeviceDaoImpl extends GenericDaoBase<ExternalLoadBalancerDeviceVO, Long> implements ExternalLoadBalancerDeviceDao {
|
||||
final SearchBuilder<ExternalLoadBalancerDeviceVO> physicalNetworkIdSearch;
|
||||
final SearchBuilder<ExternalLoadBalancerDeviceVO> physicalNetworkServiceProviderSearch;
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ import com.cloud.network.rules.FirewallRule.TrafficType;
|
|||
import com.cloud.network.rules.FirewallRuleVO;
|
||||
import com.cloud.server.ResourceTag.TaggedResourceType;
|
||||
import com.cloud.tags.dao.ResourceTagDao;
|
||||
import com.cloud.tags.dao.ResourceTagsDaoImpl;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.GenericSearchBuilder;
|
||||
|
|
@ -44,7 +43,7 @@ import com.cloud.utils.db.Transaction;
|
|||
|
||||
@Component
|
||||
@Local(value = FirewallRulesDao.class)
|
||||
@DB(txn = false)
|
||||
@DB
|
||||
public class FirewallRulesDaoImpl extends GenericDaoBase<FirewallRuleVO, Long> implements FirewallRulesDao {
|
||||
|
||||
protected final SearchBuilder<FirewallRuleVO> AllFieldsSearch;
|
||||
|
|
|
|||
|
|
@ -40,10 +40,8 @@ import com.cloud.network.Networks.TrafficType;
|
|||
import com.cloud.offering.NetworkOffering;
|
||||
import com.cloud.offerings.NetworkOfferingVO;
|
||||
import com.cloud.offerings.dao.NetworkOfferingDao;
|
||||
import com.cloud.offerings.dao.NetworkOfferingDaoImpl;
|
||||
import com.cloud.server.ResourceTag.TaggedResourceType;
|
||||
import com.cloud.tags.dao.ResourceTagDao;
|
||||
import com.cloud.tags.dao.ResourceTagsDaoImpl;
|
||||
import com.cloud.utils.db.*;
|
||||
import com.cloud.utils.db.JoinBuilder.JoinType;
|
||||
import com.cloud.utils.db.SearchCriteria.Func;
|
||||
|
|
@ -52,7 +50,7 @@ import com.cloud.utils.net.NetUtils;
|
|||
|
||||
@Component
|
||||
@Local(value = NetworkDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class NetworkDaoImpl extends GenericDaoBase<NetworkVO, Long> implements NetworkDao {
|
||||
SearchBuilder<NetworkVO> AllFieldsSearch;
|
||||
SearchBuilder<NetworkVO> AccountSearch;
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=NetworkDomainDao.class) @DB(txn=false)
|
||||
@Local(value=NetworkDomainDao.class) @DB()
|
||||
public class NetworkDomainDaoImpl extends GenericDaoBase<NetworkDomainVO, Long> implements NetworkDomainDao {
|
||||
final SearchBuilder<NetworkDomainVO> AllFieldsSearch;
|
||||
final SearchBuilder<NetworkDomainVO> DomainsSearch;
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=NetworkExternalFirewallDao.class) @DB(txn=false)
|
||||
@Local(value=NetworkExternalFirewallDao.class) @DB()
|
||||
public class NetworkExternalFirewallDaoImpl extends GenericDaoBase<NetworkExternalFirewallVO, Long> implements NetworkExternalFirewallDao {
|
||||
|
||||
final SearchBuilder<NetworkExternalFirewallVO> networkIdSearch;
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=NetworkExternalLoadBalancerDao.class) @DB(txn=false)
|
||||
@Local(value=NetworkExternalLoadBalancerDao.class) @DB()
|
||||
public class NetworkExternalLoadBalancerDaoImpl extends GenericDaoBase<NetworkExternalLoadBalancerVO, Long> implements NetworkExternalLoadBalancerDao {
|
||||
|
||||
final SearchBuilder<NetworkExternalLoadBalancerVO> networkIdSearch;
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchBuilder;
|
|||
import com.cloud.utils.db.SearchCriteria;
|
||||
|
||||
@Component
|
||||
@Local(value=NetworkServiceMapDao.class) @DB(txn=false)
|
||||
@Local(value=NetworkServiceMapDao.class) @DB()
|
||||
public class NetworkServiceMapDaoImpl extends GenericDaoBase<NetworkServiceMapVO, Long> implements NetworkServiceMapDao {
|
||||
final SearchBuilder<NetworkServiceMapVO> AllFieldsSearch;
|
||||
final SearchBuilder<NetworkServiceMapVO> MultipleServicesSearch;
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=PhysicalNetworkDao.class) @DB(txn=false)
|
||||
@Local(value=PhysicalNetworkDao.class) @DB()
|
||||
public class PhysicalNetworkDaoImpl extends GenericDaoBase<PhysicalNetworkVO, Long> implements PhysicalNetworkDao {
|
||||
final SearchBuilder<PhysicalNetworkVO> ZoneSearch;
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=PhysicalNetworkServiceProviderDao.class) @DB(txn=false)
|
||||
@Local(value=PhysicalNetworkServiceProviderDao.class) @DB()
|
||||
public class PhysicalNetworkServiceProviderDaoImpl extends GenericDaoBase<PhysicalNetworkServiceProviderVO, Long> implements PhysicalNetworkServiceProviderDao {
|
||||
final SearchBuilder<PhysicalNetworkServiceProviderVO> physicalNetworkSearch;
|
||||
final SearchBuilder<PhysicalNetworkServiceProviderVO> physicalNetworkServiceProviderSearch;
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=PhysicalNetworkTrafficTypeDao.class) @DB(txn=false)
|
||||
@Local(value=PhysicalNetworkTrafficTypeDao.class) @DB()
|
||||
public class PhysicalNetworkTrafficTypeDaoImpl extends GenericDaoBase<PhysicalNetworkTrafficTypeVO, Long> implements PhysicalNetworkTrafficTypeDao {
|
||||
final SearchBuilder<PhysicalNetworkTrafficTypeVO> physicalNetworkSearch;
|
||||
final GenericSearchBuilder<PhysicalNetworkTrafficTypeVO, String> kvmAllFieldsSearch;
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchCriteria.Op;
|
|||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@Component
|
||||
@Local(value=PortProfileDao.class) @DB(txn=false)
|
||||
@Local(value=PortProfileDao.class) @DB()
|
||||
public class PortProfileDaoImpl extends GenericDaoBase<PortProfileVO, Long> implements PortProfileDao {
|
||||
protected static final Logger s_logger = Logger.getLogger(PortProfileDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchBuilder;
|
|||
import com.cloud.utils.db.SearchCriteria;
|
||||
|
||||
@Component
|
||||
@Local(value=VirtualRouterProviderDao.class) @DB(txn=false)
|
||||
@Local(value=VirtualRouterProviderDao.class) @DB()
|
||||
public class VirtualRouterProviderDaoImpl extends GenericDaoBase<VirtualRouterProviderVO, Long> implements VirtualRouterProviderDao {
|
||||
final SearchBuilder<VirtualRouterProviderVO> AllFieldsSearch;
|
||||
|
||||
|
|
|
|||
|
|
@ -19,14 +19,13 @@ package com.cloud.network.vpc.dao;
|
|||
import com.cloud.network.vpc.NetworkACLVO;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
@Component
|
||||
@Local(value = NetworkACLDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class NetworkACLDaoImpl extends GenericDaoBase<NetworkACLVO, Long> implements NetworkACLDao{
|
||||
|
||||
protected NetworkACLDaoImpl() {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import java.util.List;
|
|||
|
||||
@Component
|
||||
@Local(value = NetworkACLItemDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class NetworkACLItemDaoImpl extends GenericDaoBase<NetworkACLItemVO, Long> implements NetworkACLItemDao {
|
||||
|
||||
protected final SearchBuilder<NetworkACLItemVO> AllFieldsSearch;
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ import com.cloud.utils.db.Transaction;
|
|||
|
||||
@Component
|
||||
@Local(value = PrivateIpDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class PrivateIpDaoImpl extends GenericDaoBase<PrivateIpVO, Long> implements PrivateIpDao {
|
||||
private static final Logger s_logger = Logger.getLogger(PrivateIpDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ import com.cloud.utils.db.Transaction;
|
|||
|
||||
@Component
|
||||
@Local(value = StaticRouteDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class StaticRouteDaoImpl extends GenericDaoBase<StaticRouteVO, Long> implements StaticRouteDao{
|
||||
protected final SearchBuilder<StaticRouteVO> AllFieldsSearch;
|
||||
protected final SearchBuilder<StaticRouteVO> NotRevokedSearch;
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ import com.cloud.utils.db.Transaction;
|
|||
|
||||
@Component
|
||||
@Local(value = VpcDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class VpcDaoImpl extends GenericDaoBase<VpcVO, Long> implements VpcDao{
|
||||
final GenericSearchBuilder<VpcVO, Integer> CountByOfferingId;
|
||||
final SearchBuilder<VpcVO> AllFieldsSearch;
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ import java.util.List;
|
|||
|
||||
@Component
|
||||
@Local(value = VpcGatewayDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class VpcGatewayDaoImpl extends GenericDaoBase<VpcGatewayVO, Long> implements VpcGatewayDao{
|
||||
protected final SearchBuilder<VpcGatewayVO> AllFieldsSearch;
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import com.cloud.utils.db.Transaction;
|
|||
|
||||
@Component
|
||||
@Local(value = VpcOfferingDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class VpcOfferingDaoImpl extends GenericDaoBase<VpcOfferingVO, Long> implements VpcOfferingDao{
|
||||
final SearchBuilder<VpcOfferingVO> AllFieldsSearch;
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria.Func;
|
|||
|
||||
@Component
|
||||
@Local(value = VpcOfferingServiceMapDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class VpcOfferingServiceMapDaoImpl extends GenericDaoBase<VpcOfferingServiceMapVO, Long> implements VpcOfferingServiceMapDao{
|
||||
final SearchBuilder<VpcOfferingServiceMapVO> AllFieldsSearch;
|
||||
final SearchBuilder<VpcOfferingServiceMapVO> MultipleServicesSearch;
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
@Local(value=VpcServiceMapDao.class) @DB(txn=false)
|
||||
@Local(value=VpcServiceMapDao.class) @DB()
|
||||
public class VpcServiceMapDaoImpl extends GenericDaoBase<VpcServiceMapVO, Long> implements VpcServiceMapDao {
|
||||
final SearchBuilder<VpcServiceMapVO> AllFieldsSearch;
|
||||
final SearchBuilder<VpcServiceMapVO> MultipleServicesSearch;
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ import com.cloud.utils.db.Transaction;
|
|||
|
||||
@Component
|
||||
@Local(value = NetworkOfferingDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class NetworkOfferingDaoImpl extends GenericDaoBase<NetworkOfferingVO, Long> implements NetworkOfferingDao {
|
||||
final SearchBuilder<NetworkOfferingVO> NameSearch;
|
||||
final SearchBuilder<NetworkOfferingVO> SystemOfferingSearch;
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Func;
|
||||
|
||||
@Component
|
||||
@Local(value=NetworkOfferingServiceMapDao.class) @DB(txn=false)
|
||||
@Local(value=NetworkOfferingServiceMapDao.class) @DB()
|
||||
public class NetworkOfferingServiceMapDaoImpl extends GenericDaoBase<NetworkOfferingServiceMapVO, Long> implements NetworkOfferingServiceMapDao {
|
||||
|
||||
final SearchBuilder<NetworkOfferingServiceMapVO> AllFieldsSearch;
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchBuilder;
|
|||
import com.cloud.utils.db.SearchCriteria;
|
||||
|
||||
@Component
|
||||
@Local(value={ServiceOfferingDao.class}) @DB(txn=false)
|
||||
@Local(value={ServiceOfferingDao.class}) @DB()
|
||||
public class ServiceOfferingDaoImpl extends GenericDaoBase<ServiceOfferingVO, Long> implements ServiceOfferingDao {
|
||||
protected static final Logger s_logger = Logger.getLogger(ServiceOfferingDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
|
||||
@Component
|
||||
@Local(value = { StoragePoolWorkDao.class })
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class StoragePoolWorkDaoImpl extends GenericDaoBase<StoragePoolWorkVO, Long> implements StoragePoolWorkDao {
|
||||
|
||||
protected final SearchBuilder<StoragePoolWorkVO> PendingWorkForPrepareForMaintenanceSearch;
|
||||
|
|
|
|||
|
|
@ -354,7 +354,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
|||
}
|
||||
|
||||
@Override
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public Pair<Long, Long> getCountAndTotalByPool(long poolId) {
|
||||
SearchCriteria<SumCount> sc = TotalSizeByPoolSearch.create();
|
||||
sc.setParameters("poolId", poolId);
|
||||
|
|
@ -506,7 +506,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
|||
}
|
||||
|
||||
@Override
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public Pair<Long, Long> getNonDestroyedCountAndTotalByPool(long poolId) {
|
||||
SearchCriteria<SumCount> sc = TotalSizeByPoolSearch.create();
|
||||
sc.setParameters("poolId", poolId);
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
|
||||
@Component
|
||||
@Local(value = VersionDao.class)
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements VersionDao {
|
||||
private static final Logger s_logger = Logger.getLogger(VersionDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ import com.cloud.utils.db.DB;
|
|||
|
||||
@Component
|
||||
@Local(value= { UserVmCloneSettingDao.class })
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class UserVmCloneSettingDaoImpl extends GenericDaoBase<UserVmCloneSettingVO, Long> implements UserVmCloneSettingDao {
|
||||
public static final Logger s_logger = Logger.getLogger(UserVmCloneSettingDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import java.util.List;
|
|||
|
||||
@Component
|
||||
@Local(value={GlobalLoadBalancerLbRuleMapDao.class})
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class GlobalLoadBalancerLbRuleMapDaoImpl extends GenericDaoBase<GlobalLoadBalancerLbRuleMapVO, Long> implements GlobalLoadBalancerLbRuleMapDao {
|
||||
|
||||
private final SearchBuilder<GlobalLoadBalancerLbRuleMapVO> listByGslbRuleId;
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ import com.cloud.utils.db.Transaction;
|
|||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@Local(value = { PrimaryDataStoreDao.class })
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long> implements PrimaryDataStoreDao {
|
||||
protected final SearchBuilder<StoragePoolVO> AllFieldSearch;
|
||||
protected final SearchBuilder<StoragePoolVO> DcPodSearch;
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
|||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
|
||||
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
|
||||
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
|
|
@ -62,9 +62,9 @@ import com.cloud.agent.api.to.VirtualMachineTO;
|
|||
import com.cloud.configuration.Config;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.server.ManagementService;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
|
|
@ -81,7 +81,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
|
||||
@Component
|
||||
public class
|
||||
AncientDataMotionStrategy implements DataMotionStrategy {
|
||||
AncientDataMotionStrategy implements DataMotionStrategy {
|
||||
private static final Logger s_logger = Logger.getLogger(AncientDataMotionStrategy.class);
|
||||
@Inject
|
||||
EndPointSelector selector;
|
||||
|
|
@ -138,7 +138,8 @@ public class
|
|||
DataTO destTO = destData.getTO();
|
||||
DataStoreTO srcStoreTO = srcTO.getDataStore();
|
||||
DataStoreTO destStoreTO = destTO.getDataStore();
|
||||
if (srcStoreTO instanceof NfsTO || srcStoreTO.getRole() == DataStoreRole.ImageCache) {
|
||||
if (srcStoreTO instanceof NfsTO || srcStoreTO.getRole() == DataStoreRole.ImageCache ||
|
||||
(srcStoreTO instanceof PrimaryDataStoreTO && ((PrimaryDataStoreTO)srcStoreTO).getPoolType() == StoragePoolType.NetworkFilesystem)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -264,8 +265,14 @@ public class
|
|||
int _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value,
|
||||
Integer.parseInt(Config.CreateVolumeFromSnapshotWait.getDefaultValue()));
|
||||
|
||||
EndPoint ep = null;
|
||||
if (srcData.getDataStore().getRole() == DataStoreRole.Primary) {
|
||||
ep = selector.select(volObj);
|
||||
} else {
|
||||
ep = selector.select(snapObj, volObj);
|
||||
}
|
||||
|
||||
CopyCommand cmd = new CopyCommand(srcData.getTO(), volObj.getTO(), _createVolumeFromSnapshotWait, _mgmtServer.getExecuteInSequence());
|
||||
EndPoint ep = selector.select(snapObj, volObj);
|
||||
Answer answer = ep.sendMessage(cmd);
|
||||
|
||||
return answer;
|
||||
|
|
@ -433,11 +440,17 @@ public class
|
|||
srcData = cacheSnapshotChain(snapshot);
|
||||
}
|
||||
|
||||
EndPoint ep = null;
|
||||
if (srcData.getDataStore().getRole() == DataStoreRole.Primary) {
|
||||
ep = selector.select(destData);
|
||||
} else {
|
||||
ep = selector.select(srcData, destData);
|
||||
}
|
||||
|
||||
CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _createprivatetemplatefromsnapshotwait, _mgmtServer.getExecuteInSequence());
|
||||
EndPoint ep = selector.select(srcData, destData);
|
||||
Answer answer = ep.sendMessage(cmd);
|
||||
|
||||
// clean up snapshot copied to staging
|
||||
|
||||
// clean up snapshot copied to staging
|
||||
if (needCache && srcData != null) {
|
||||
cacheMgr.deleteCacheObject(srcData);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ import java.util.concurrent.ExecutionException;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.capacity.dao.CapacityDao;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider;
|
||||
|
|
@ -39,9 +38,11 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
|
|||
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
||||
import org.apache.cloudstack.storage.image.ImageStoreDriver;
|
||||
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
|
||||
import org.apache.cloudstack.storage.to.ImageStoreTO;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.capacity.dao.CapacityDao;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
|
|
@ -181,7 +182,16 @@ public class ImageStoreImpl implements ImageStoreEntity {
|
|||
|
||||
@Override
|
||||
public DataStoreTO getTO() {
|
||||
return getDriver().getStoreTO(this);
|
||||
DataStoreTO to = getDriver().getStoreTO(this);
|
||||
if (to == null) {
|
||||
ImageStoreTO primaryTO = new ImageStoreTO();
|
||||
primaryTO.setProviderName(getProviderName());
|
||||
primaryTO.setRole(getRole());
|
||||
primaryTO.setType(getProtocol());
|
||||
primaryTO.setUri(getUri());
|
||||
return primaryTO;
|
||||
}
|
||||
return to;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -18,7 +18,50 @@
|
|||
*/
|
||||
package org.apache.cloudstack.storage.test;
|
||||
|
||||
import com.cloud.cluster.LockMasterListener;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.volume.VolumeObject;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mockito;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.DataCenter;
|
||||
import com.cloud.dc.DataCenterVO;
|
||||
|
|
@ -29,6 +72,7 @@ import com.cloud.dc.dao.HostPodDao;
|
|||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.org.Cluster;
|
||||
import com.cloud.org.Managed;
|
||||
import com.cloud.server.LockMasterListener;
|
||||
import com.cloud.storage.CreateSnapshotPayload;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.ScopeType;
|
||||
|
|
@ -47,53 +91,7 @@ import com.cloud.user.AccountManager;
|
|||
import com.cloud.user.User;
|
||||
import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.component.ComponentContext;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.Merovingian2;
|
||||
import junit.framework.Assert;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
||||
import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.volume.VolumeObject;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mockito;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
|
@ -317,7 +315,7 @@ public class SnapshotTestWithFakeData {
|
|||
final VolumeInfo volumeInfo = createVolume(1L, store);
|
||||
Assert.assertTrue(volumeInfo.getState() == Volume.State.Ready);
|
||||
vol = volumeInfo;
|
||||
// final SnapshotPolicyVO policyVO = createSnapshotPolicy(vol.getId());
|
||||
// final SnapshotPolicyVO policyVO = createSnapshotPolicy(vol.getId());
|
||||
|
||||
|
||||
ExecutorService pool = Executors.newFixedThreadPool(2);
|
||||
|
|
@ -325,7 +323,7 @@ public class SnapshotTestWithFakeData {
|
|||
List<Future<Boolean>> future = new ArrayList<Future<Boolean>>();
|
||||
for(int i = 0; i < 12; i++) {
|
||||
final int cnt = i;
|
||||
Future<Boolean> task = pool.submit(new Callable<Boolean>() {
|
||||
Future<Boolean> task = pool.submit(new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
boolean r = true;
|
||||
|
|
|
|||
|
|
@ -17,20 +17,24 @@
|
|||
|
||||
package org.apache.cloudstack.storage.snapshot;
|
||||
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.Snapshot;
|
||||
import com.cloud.storage.dao.SnapshotDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.*;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
||||
import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
|
|
@ -41,13 +45,19 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
|
|||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.Snapshot;
|
||||
import com.cloud.storage.dao.SnapshotDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
|
||||
|
||||
@Component
|
||||
public class SnapshotServiceImpl implements SnapshotService {
|
||||
|
|
@ -383,7 +393,7 @@ public class SnapshotServiceImpl implements SnapshotService {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean revertSnapshot(SnapshotInfo snapshot) {
|
||||
public boolean revertSnapshot(Long snapshotId) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.snapshot;
|
||||
|
|
@ -35,4 +35,9 @@ public abstract class SnapshotStrategyBase implements SnapshotStrategy {
|
|||
public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) {
|
||||
return snapshotSvr.backupSnapshot(snapshot);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean revertSnapshot(Long snapshotId) {
|
||||
return snapshotSvr.revertSnapshot(snapshotId);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,24 +11,27 @@
|
|||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.snapshot;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.utils.db.DB;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.*;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
|
|
@ -36,9 +39,11 @@ import com.cloud.exception.InvalidParameterValueException;
|
|||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.Snapshot;
|
||||
import com.cloud.storage.SnapshotVO;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.dao.SnapshotDao;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
|
||||
|
|
@ -236,6 +241,11 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean revertSnapshot(Long snapshotId) {
|
||||
throw new CloudRuntimeException("revert Snapshot is not supported");
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public SnapshotInfo takeSnapshot(SnapshotInfo snapshot) {
|
||||
|
|
|
|||
|
|
@ -27,9 +27,6 @@ import java.util.List;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
|
|
@ -37,6 +34,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
||||
import org.apache.cloudstack.storage.LocalHostEndpoint;
|
||||
import org.apache.cloudstack.storage.RemoteHostEndPoint;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
|
|
@ -250,6 +249,11 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public EndPoint select(Scope scope, Long storeId) {
|
||||
return findEndPointInScope(scope, findOneHostOnPrimaryStorage, storeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<EndPoint> selectAll(DataStore store) {
|
||||
List<EndPoint> endPoints = new ArrayList<EndPoint>();
|
||||
|
|
|
|||
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.helper;
|
||||
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
|
||||
public interface HypervisorHelper {
|
||||
DataTO introduceObject(DataTO object, Scope scope, Long storeId);
|
||||
boolean forgetObject(DataTO object, Scope scope, Long storeId);
|
||||
SnapshotObjectTO takeSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope);
|
||||
boolean revertSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope);
|
||||
}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.helper;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
||||
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectAnswer;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
public class HypervisorHelperImpl implements HypervisorHelper {
|
||||
private static final Logger s_logger = Logger.getLogger(HypervisorHelperImpl.class);
|
||||
@Inject
|
||||
EndPointSelector selector;
|
||||
|
||||
@Override
|
||||
public DataTO introduceObject(DataTO object, Scope scope, Long storeId) {
|
||||
EndPoint ep = selector.select(scope, storeId);
|
||||
IntroduceObjectCmd cmd = new IntroduceObjectCmd(object);
|
||||
Answer answer = ep.sendMessage(cmd);
|
||||
if (answer == null || !answer.getResult()) {
|
||||
String errMsg = answer == null ? null : answer.getDetails();
|
||||
throw new CloudRuntimeException("Failed to introduce object, due to " + errMsg);
|
||||
}
|
||||
IntroduceObjectAnswer introduceObjectAnswer = (IntroduceObjectAnswer)answer;
|
||||
return introduceObjectAnswer.getDataTO();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean forgetObject(DataTO object, Scope scope, Long storeId) {
|
||||
EndPoint ep = selector.select(scope, storeId);
|
||||
ForgetObjectCmd cmd = new ForgetObjectCmd(object);
|
||||
Answer answer = ep.sendMessage(cmd);
|
||||
if (answer == null || !answer.getResult()) {
|
||||
String errMsg = answer == null ? null : answer.getDetails();
|
||||
if (errMsg != null) {
|
||||
s_logger.debug("Failed to forget object: " + errMsg);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnapshotObjectTO takeSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope) {
|
||||
return null; //To change body of implemented methods use File | Settings | File Templates.
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean revertSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope) {
|
||||
return false; //To change body of implemented methods use File | Settings | File Templates.
|
||||
}
|
||||
}
|
||||
|
|
@ -36,24 +36,8 @@ import java.lang.annotation.Target;
|
|||
* _dao.acquireInLockTable(id);
|
||||
* ...
|
||||
* _dao.releaseFromLockTable(id);
|
||||
*
|
||||
* 3. Annotate methods that are inside a DAO but doesn't use
|
||||
* the Transaction class. Generally, these are methods
|
||||
* that are utility methods for setting up searches. In
|
||||
* this case use @DB(txn=false) to annotate the method.
|
||||
* While this is not required, it helps when you're debugging
|
||||
* the code and it saves on method calls during runtime.
|
||||
*
|
||||
*/
|
||||
@Target({TYPE, METHOD})
|
||||
@Retention(RUNTIME)
|
||||
public @interface DB {
|
||||
/**
|
||||
* (Optional) Specifies that the method
|
||||
* does not use transaction. This is useful for
|
||||
* utility methods within DAO classes which are
|
||||
* automatically marked with @DB. By marking txn=false,
|
||||
* the method is not surrounded with transaction code.
|
||||
*/
|
||||
boolean txn() default true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -168,7 +168,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked") @DB(txn=false)
|
||||
@SuppressWarnings("unchecked") @DB()
|
||||
public <J> GenericSearchBuilder<T, J> createSearchBuilder(Class<J> resultType) {
|
||||
return new GenericSearchBuilder<T, J>(_entityBeanType, resultType);
|
||||
}
|
||||
|
|
@ -282,7 +282,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
setRunLevel(ComponentLifecycle.RUN_LEVEL_SYSTEM);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
@SuppressWarnings("unchecked")
|
||||
public T createForUpdate(final ID id) {
|
||||
final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(this)});
|
||||
|
|
@ -296,12 +296,12 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return entity;
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public T createForUpdate() {
|
||||
return createForUpdate(null);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public <K> K getNextInSequence(final Class<K> clazz, final String name) {
|
||||
final TableGenerator tg = _tgs.get(name);
|
||||
assert (tg != null) : "Couldn't find Table generator using " + name;
|
||||
|
|
@ -309,7 +309,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return s_seqFetcher.getNextSequence(clazz, tg);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public <K> K getRandomlyIncreasingNextInSequence(final Class<K> clazz, final String name) {
|
||||
final TableGenerator tg = _tgs.get(name);
|
||||
assert (tg != null) : "Couldn't find Table generator using " + name;
|
||||
|
|
@ -317,19 +317,19 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return s_seqFetcher.getRandomNextSequence(clazz, tg);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public List<T> lockRows(final SearchCriteria<T> sc, final Filter filter, final boolean exclusive) {
|
||||
return search(sc, filter, exclusive, false);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public T lockOneRandomRow(final SearchCriteria<T> sc, final boolean exclusive) {
|
||||
final Filter filter = new Filter(1);
|
||||
final List<T> beans = search(sc, filter, exclusive, true);
|
||||
return beans.isEmpty() ? null : beans.get(0);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected List<T> search(SearchCriteria<T> sc, final Filter filter, final Boolean lock, final boolean cache) {
|
||||
if (_removed != null) {
|
||||
if (sc == null) {
|
||||
|
|
@ -340,7 +340,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return searchIncludingRemoved(sc, filter, lock, cache);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected List<T> search(SearchCriteria<T> sc, final Filter filter, final Boolean lock, final boolean cache, final boolean enable_query_cache) {
|
||||
if (_removed != null) {
|
||||
if (sc == null) {
|
||||
|
|
@ -499,7 +499,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public <M> List<M> customSearch(SearchCriteria<M> sc, final Filter filter) {
|
||||
if (_removed != null) {
|
||||
sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL);
|
||||
|
|
@ -508,7 +508,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return customSearchIncludingRemoved(sc, filter);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected void setField(Object entity, Field field, ResultSet rs, int index) throws SQLException {
|
||||
try {
|
||||
final Class<?> type = field.getType();
|
||||
|
|
@ -652,7 +652,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn=false) @SuppressWarnings("unchecked")
|
||||
@DB() @SuppressWarnings("unchecked")
|
||||
protected <M> M getObject(Class<M> type, ResultSet rs, int index) throws SQLException {
|
||||
if (type == String.class) {
|
||||
byte[] bytes = rs.getBytes(index);
|
||||
|
|
@ -744,7 +744,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected int addJoinAttributes(int count, PreparedStatement pstmt, Collection<JoinBuilder<SearchCriteria<?>>> joins) throws SQLException {
|
||||
for (JoinBuilder<SearchCriteria<?>> join : joins) {
|
||||
for (final Pair<Attribute, Object> value : join.getT().getValues()) {
|
||||
|
|
@ -832,12 +832,12 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected Attribute findAttributeByFieldName(String name) {
|
||||
return _allAttributes.get(name);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected String buildSelectByIdSql(final StringBuilder sql) {
|
||||
if (_idField == null) {
|
||||
return null;
|
||||
|
|
@ -857,13 +857,13 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return sql.toString();
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
@Override
|
||||
public Class<T> getEntityBeanType() {
|
||||
return _entityBeanType;
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected T findOneIncludingRemovedBy(final SearchCriteria<T> sc) {
|
||||
Filter filter = new Filter(1);
|
||||
List<T> results = searchIncludingRemoved(sc, filter, null, false);
|
||||
|
|
@ -872,7 +872,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
|
||||
@Override
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
public T findOneBy(final SearchCriteria<T> sc) {
|
||||
if (_removed != null) {
|
||||
sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL);
|
||||
|
|
@ -880,7 +880,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return findOneIncludingRemovedBy(sc);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected List<T> listBy(final SearchCriteria<T> sc, final Filter filter) {
|
||||
if (_removed != null) {
|
||||
sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL);
|
||||
|
|
@ -888,7 +888,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return listIncludingRemovedBy(sc, filter);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected List<T> listBy(final SearchCriteria<T> sc, final Filter filter, final boolean enable_query_cache) {
|
||||
if (_removed != null) {
|
||||
sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL);
|
||||
|
|
@ -896,27 +896,27 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return listIncludingRemovedBy(sc, filter, enable_query_cache);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected List<T> listBy(final SearchCriteria<T> sc) {
|
||||
return listBy(sc, null);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected List<T> listIncludingRemovedBy(final SearchCriteria<T> sc, final Filter filter, final boolean enable_query_cache) {
|
||||
return searchIncludingRemoved(sc, filter, null, false, enable_query_cache);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected List<T> listIncludingRemovedBy(final SearchCriteria<T> sc, final Filter filter) {
|
||||
return searchIncludingRemoved(sc, filter, null, false);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected List<T> listIncludingRemovedBy(final SearchCriteria<T> sc) {
|
||||
return listIncludingRemovedBy(sc, null);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
@SuppressWarnings("unchecked")
|
||||
public T findById(final ID id) {
|
||||
if (_cache != null) {
|
||||
|
|
@ -927,26 +927,26 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public T findByUuid(final String uuid) {
|
||||
SearchCriteria<T> sc = createSearchCriteria();
|
||||
sc.addAnd("uuid", SearchCriteria.Op.EQ, uuid);
|
||||
return findOneBy(sc);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public T findByUuidIncludingRemoved(final String uuid) {
|
||||
SearchCriteria<T> sc = createSearchCriteria();
|
||||
sc.addAnd("uuid", SearchCriteria.Op.EQ, uuid);
|
||||
return findOneIncludingRemovedBy(sc);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public T findByIdIncludingRemoved(ID id) {
|
||||
return findById(id, true, null);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public T findById(final ID id, boolean fresh) {
|
||||
if(!fresh) {
|
||||
return findById(id);
|
||||
|
|
@ -958,7 +958,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return lockRow(id, null);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public T lockRow(ID id, Boolean lock) {
|
||||
return findById(id, false, lock);
|
||||
}
|
||||
|
|
@ -987,7 +987,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public T acquireInLockTable(ID id) {
|
||||
return acquireInLockTable(id, _timeoutSeconds);
|
||||
}
|
||||
|
|
@ -1018,7 +1018,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return txn.release(_table + id);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public boolean lockInLockTable(final String id) {
|
||||
return lockInLockTable(id, _timeoutSeconds);
|
||||
}
|
||||
|
|
@ -1035,12 +1035,12 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return txn.release(_table + id);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public List<T> listAllIncludingRemoved() {
|
||||
return listAllIncludingRemoved(null);
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected List<Object> addGroupBy(final StringBuilder sql, SearchCriteria<?> sc) {
|
||||
Pair<GroupBy<?, ?, ?>, List<Object>> groupBys = sc.getGroupBy();
|
||||
if (groupBys != null) {
|
||||
|
|
@ -1051,7 +1051,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected void addFilter(final StringBuilder sql, final Filter filter) {
|
||||
if (filter != null) {
|
||||
if (filter.getOrderBy() != null) {
|
||||
|
|
@ -1067,7 +1067,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public List<T> listAllIncludingRemoved(final Filter filter) {
|
||||
final StringBuilder sql = createPartialSelectSql(null, false);
|
||||
addFilter(sql, filter);
|
||||
|
|
@ -1098,12 +1098,12 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public List<T> listAll() {
|
||||
return listAll(null);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public List<T> listAll(final Filter filter) {
|
||||
if (_removed == null) {
|
||||
return listAllIncludingRemoved(filter);
|
||||
|
|
@ -1174,7 +1174,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected StringBuilder createPartialSelectSql(SearchCriteria<?> sc, final boolean whereClause, final boolean enable_query_cache) {
|
||||
StringBuilder sql = new StringBuilder(enable_query_cache ? _partialQueryCacheSelectSql.first() : _partialSelectSql.first());
|
||||
if (sc != null && !sc.isSelectAll()) {
|
||||
|
|
@ -1189,7 +1189,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return sql;
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected StringBuilder createPartialSelectSql(SearchCriteria<?> sc, final boolean whereClause) {
|
||||
StringBuilder sql = new StringBuilder(_partialSelectSql.first());
|
||||
if (sc != null && !sc.isSelectAll()) {
|
||||
|
|
@ -1205,7 +1205,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
|
||||
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
protected void addJoins(StringBuilder str, Collection<JoinBuilder<SearchCriteria<?>>> joins) {
|
||||
int fromIndex = str.lastIndexOf("WHERE");
|
||||
if (fromIndex == -1) {
|
||||
|
|
@ -1238,24 +1238,24 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public List<T> search(final SearchCriteria<T> sc, final Filter filter) {
|
||||
return search(sc, filter, null, false);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public Pair<List<T>, Integer> searchAndCount(final SearchCriteria<T> sc, final Filter filter) {
|
||||
List<T> objects = search(sc, filter, null, false);
|
||||
Integer count = getCount(sc);
|
||||
return new Pair<List<T>, Integer>(objects, count);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public List<T> search(final SearchCriteria<T> sc, final Filter filter, final boolean enable_query_cache) {
|
||||
return search(sc, filter, null, false, enable_query_cache);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public boolean update(ID id, T entity) {
|
||||
assert Enhancer.isEnhanced(entity.getClass()) : "Entity is not generated by this dao";
|
||||
|
||||
|
|
@ -1264,14 +1264,14 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return result;
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
public int update(final T entity, final SearchCriteria<T> sc, Integer rows) {
|
||||
final UpdateBuilder ub = getUpdateBuilder(entity);
|
||||
return update(ub, sc, rows);
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
public int update(final T entity, final SearchCriteria<T> sc) {
|
||||
final UpdateBuilder ub = getUpdateBuilder(entity);
|
||||
return update(ub, sc, null);
|
||||
|
|
@ -1390,7 +1390,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
txn.commit();
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected Object generateValue(final Attribute attr) {
|
||||
if (attr.is(Attribute.Flag.Created) || attr.is(Attribute.Flag.Removed)) {
|
||||
return new Date();
|
||||
|
|
@ -1414,7 +1414,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected void prepareAttribute(final int j, final PreparedStatement pstmt, final Attribute attr, Object value) throws SQLException {
|
||||
if (attr.is(Attribute.Flag.DaoGenerated) && value == null) {
|
||||
value = generateValue(attr);
|
||||
|
|
@ -1519,7 +1519,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected int prepareAttributes(final PreparedStatement pstmt, final Object entity, final Attribute[] attrs, final int index) throws SQLException {
|
||||
int j = 0;
|
||||
for (int i = 0; i < attrs.length; i++) {
|
||||
|
|
@ -1536,7 +1536,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return j;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked") @DB(txn=false)
|
||||
@SuppressWarnings("unchecked") @DB()
|
||||
protected T toEntityBean(final ResultSet result, final boolean cache) throws SQLException {
|
||||
final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(this)});
|
||||
|
||||
|
|
@ -1553,7 +1553,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return entity;
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected T toVO(ResultSet result, boolean cache) throws SQLException {
|
||||
T entity;
|
||||
try {
|
||||
|
|
@ -1575,7 +1575,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return entity;
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected void toEntityBean(final ResultSet result, final T entity) throws SQLException {
|
||||
ResultSetMetaData meta = result.getMetaData();
|
||||
for (int index = 1, max = meta.getColumnCount(); index <= max; index++) {
|
||||
|
|
@ -1586,7 +1586,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn = true)
|
||||
@DB()
|
||||
@SuppressWarnings("unchecked")
|
||||
protected void loadCollection(T entity, Attribute attr) {
|
||||
EcInfo ec = (EcInfo)attr.attache;
|
||||
|
|
@ -1688,7 +1688,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected void setField(final Object entity, final ResultSet rs, ResultSetMetaData meta, final int index) throws SQLException {
|
||||
Attribute attr = _allColumns.get(new Pair<String, String>(meta.getTableName(index), meta.getColumnName(index)));
|
||||
if ( attr == null ){
|
||||
|
|
@ -1745,7 +1745,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
|
||||
protected Cache _cache;
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected void createCache(final Map<String, ? extends Object> params) {
|
||||
final String value = (String)params.get("cache.size");
|
||||
|
||||
|
|
@ -1762,7 +1762,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
||||
_name = name;
|
||||
|
||||
|
|
@ -1778,19 +1778,19 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
return true;
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
public static <T> UpdateBuilder getUpdateBuilder(final T entityObject) {
|
||||
final Factory factory = (Factory)entityObject;
|
||||
assert(factory != null);
|
||||
return (UpdateBuilder)factory.getCallback(1);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public SearchBuilder<T> createSearchBuilder() {
|
||||
return new SearchBuilder<T>(_entityBeanType);
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
@Override @DB()
|
||||
public SearchCriteria<T> createSearchCriteria() {
|
||||
SearchBuilder<T> builder = createSearchBuilder();
|
||||
return builder.create();
|
||||
|
|
@ -1859,7 +1859,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
|
|||
}
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
protected StringBuilder createCountSelect(SearchCriteria<?> sc, final boolean whereClause) {
|
||||
StringBuilder sql = new StringBuilder(_count);
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>cloud-maven-standard</artifactId>
|
||||
<name>Apache CloudStack Maven Contentions Parent</name>
|
||||
<name>Apache CloudStack Maven Conventions Parent</name>
|
||||
<description>Historically ACS was built with a custom build system mixing ant and wscript. When the conversion to maven was done the existing directory structure in git was kept. So the src, testing, and resources folders in ACS don't follow the standard maven conventions. This parent pom forces the folders back to the standard conventions</description>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
|
|
|
|||
|
|
@ -18,16 +18,11 @@
|
|||
// Automatically generated by addcopyright.py at 01/29/2013
|
||||
package com.cloud.baremetal.database;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.Filter;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.GenericSearchBuilder;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
|
|
@ -36,7 +31,7 @@ import com.cloud.utils.db.GenericQueryBuilder;
|
|||
|
||||
@Component
|
||||
@Local(value=BaremetalDhcpDao.class)
|
||||
@DB(txn=false)
|
||||
@DB()
|
||||
public class BaremetalDhcpDaoImpl extends GenericDaoBase<BaremetalDhcpVO, Long> implements BaremetalDhcpDao {
|
||||
|
||||
public BaremetalDhcpDaoImpl() {
|
||||
|
|
|
|||
|
|
@ -18,16 +18,11 @@
|
|||
// Automatically generated by addcopyright.py at 01/29/2013
|
||||
package com.cloud.baremetal.database;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.Filter;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.GenericSearchBuilder;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
|
|
@ -36,6 +31,6 @@ import com.cloud.utils.db.GenericQueryBuilder;
|
|||
|
||||
@Component
|
||||
@Local(value = {BaremetalPxeDao.class})
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class BaremetalPxeDaoImpl extends GenericDaoBase<BaremetalPxeVO, Long> implements BaremetalPxeDao {
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,10 +18,10 @@
|
|||
*/
|
||||
package com.cloud.hypervisor.kvm.storage;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.text.DateFormat;
|
||||
|
|
@ -35,11 +35,6 @@ import java.util.UUID;
|
|||
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.agent.api.storage.CopyVolumeAnswer;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.S3TO;
|
||||
import com.cloud.agent.api.to.StorageFilerTO;
|
||||
import com.cloud.utils.S3Utils;
|
||||
import org.apache.cloudstack.storage.command.AttachAnswer;
|
||||
import org.apache.cloudstack.storage.command.AttachCommand;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
|
|
@ -49,6 +44,8 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
|||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DettachAnswer;
|
||||
import org.apache.cloudstack.storage.command.DettachCommand;
|
||||
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
|
|
@ -57,20 +54,28 @@ import org.apache.cloudstack.utils.qemu.QemuImg;
|
|||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.DomainInfo;
|
||||
import org.libvirt.DomainSnapshot;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import com.ceph.rados.IoCTX;
|
||||
import com.ceph.rados.Rados;
|
||||
import com.ceph.rados.RadosException;
|
||||
import com.ceph.rbd.Rbd;
|
||||
import com.ceph.rbd.RbdException;
|
||||
import com.ceph.rbd.RbdImage;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.agent.api.to.NfsTO;
|
||||
import com.cloud.agent.api.to.S3TO;
|
||||
import com.cloud.exception.InternalErrorException;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
|
||||
|
|
@ -87,16 +92,10 @@ import com.cloud.storage.template.Processor.FormatInfo;
|
|||
import com.cloud.storage.template.QCOW2Processor;
|
||||
import com.cloud.storage.template.TemplateLocation;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.S3Utils;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
import com.ceph.rados.Rados;
|
||||
import com.ceph.rados.RadosException;
|
||||
import com.ceph.rados.IoCTX;
|
||||
import com.ceph.rbd.Rbd;
|
||||
import com.ceph.rbd.RbdImage;
|
||||
import com.ceph.rbd.RbdException;
|
||||
|
||||
import static com.cloud.utils.S3Utils.putFile;
|
||||
|
||||
public class KVMStorageProcessor implements StorageProcessor {
|
||||
|
|
@ -197,7 +196,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
primaryPool, cmd.getWaitInMillSeconds());
|
||||
|
||||
|
||||
DataTO data = null;
|
||||
DataTO data = null;
|
||||
/**
|
||||
* Force the ImageFormat for RBD templates to RAW
|
||||
*
|
||||
|
|
@ -370,7 +369,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
String srcVolumeName = srcVolumePath.substring(index + 1);
|
||||
secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(
|
||||
secondaryStorageUrl + File.separator + volumeDir
|
||||
);
|
||||
);
|
||||
if (!srcVolumeName.endsWith(".qcow2") && srcFormat == ImageFormat.QCOW2) {
|
||||
srcVolumeName = srcVolumeName + ".qcow2";
|
||||
}
|
||||
|
|
@ -1207,4 +1206,14 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
public Answer deleteSnapshot(DeleteCommand cmd) {
|
||||
return new Answer(cmd);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer introduceObject(IntroduceObjectCmd cmd) {
|
||||
return new Answer(cmd, false, "not implememented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer forgetObject(ForgetObjectCmd cmd) {
|
||||
return new Answer(cmd, false, "not implememented yet");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,14 +19,9 @@
|
|||
|
||||
package com.cloud.resource;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.agent.api.to.NfsTO;
|
||||
import com.cloud.agent.manager.SimulatorManager;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.resource.StorageProcessor;
|
||||
import java.io.File;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.cloudstack.storage.command.AttachAnswer;
|
||||
import org.apache.cloudstack.storage.command.AttachCommand;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
|
|
@ -36,13 +31,21 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
|||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DettachAnswer;
|
||||
import org.apache.cloudstack.storage.command.DettachCommand;
|
||||
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.UUID;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.agent.api.to.NfsTO;
|
||||
import com.cloud.agent.manager.SimulatorManager;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.resource.StorageProcessor;
|
||||
|
||||
public class SimulatorStorageProcessor implements StorageProcessor {
|
||||
|
||||
|
|
@ -214,4 +217,16 @@ public class SimulatorStorageProcessor implements StorageProcessor {
|
|||
public Answer deleteSnapshot(DeleteCommand cmd) {
|
||||
return new Answer(cmd);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer introduceObject(IntroduceObjectCmd cmd) {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer forgetObject(ForgetObjectCmd cmd) {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,12 +19,10 @@ package com.cloud.ucs.database;
|
|||
|
||||
import javax.ejb.Local;
|
||||
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
@Local(value = { UcsBladeDao.class })
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class UcsBladeDaoImpl extends GenericDaoBase<UcsBladeVO, Long> implements UcsBladeDao {
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,12 +19,10 @@ package com.cloud.ucs.database;
|
|||
|
||||
import javax.ejb.Local;
|
||||
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
|
||||
@Local(value = { UcsManagerDao.class })
|
||||
@DB(txn = false)
|
||||
@DB()
|
||||
public class UcsManagerDaoImpl extends GenericDaoBase<UcsManagerVO, Long> implements UcsManagerDao {
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=LegacyZoneDao.class) @DB(txn=false)
|
||||
@Local(value=LegacyZoneDao.class) @DB
|
||||
public class LegacyZoneDaoImpl extends GenericDaoBase<LegacyZoneVO, Long> implements LegacyZoneDao {
|
||||
protected static final Logger s_logger = Logger.getLogger(LegacyZoneDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=VmwareDatacenterDao.class) @DB(txn=false)
|
||||
@Local(value=VmwareDatacenterDao.class) @DB
|
||||
public class VmwareDatacenterDaoImpl extends GenericDaoBase<VmwareDatacenterVO, Long> implements VmwareDatacenterDao {
|
||||
protected static final Logger s_logger = Logger.getLogger(VmwareDatacenterDaoImpl.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
package com.cloud.hypervisor.vmware.manager;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.hypervisor.vmware.mo.DatastoreMO;
|
||||
import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost;
|
||||
import com.cloud.hypervisor.vmware.util.VmwareContext;
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
|
|
@ -28,7 +29,8 @@ public interface VmwareHostService {
|
|||
|
||||
String getWorkerName(VmwareContext context, Command cmd, int workerSequence);
|
||||
|
||||
ManagedObjectReference handleDatastoreAndVmdkAttach(Command cmd, String iqn, String storageHost, int storagePort,
|
||||
String initiatorUsername, String initiatorPassword, String targetUsername, String targetPassword) throws Exception;
|
||||
ManagedObjectReference getVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber,
|
||||
String iqn, String initiatorChapName, String initiatorChapSecret, String mutualChapName, String mutualChapSecret) throws Exception;
|
||||
void createVmdk(Command cmd, DatastoreMO dsMo, String volumeDatastorePath, Long volumeSize) throws Exception;
|
||||
void handleDatastoreAndVmdkDetach(String iqn, String storageHost, int storagePort) throws Exception;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2629,6 +2629,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter());
|
||||
VirtualMachineDiskInfoBuilder diskInfoBuilder = null;
|
||||
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
|
||||
boolean hasSnapshot = false;
|
||||
if (vmMo != null) {
|
||||
s_logger.info("VM " + vmInternalCSName + " already exists, tear down devices for reconfiguration");
|
||||
if (getVmState(vmMo) != State.Stopped)
|
||||
|
|
@ -2636,7 +2637,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
// retrieve disk information before we tear down
|
||||
diskInfoBuilder = vmMo.getDiskInfoBuilder();
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
|
||||
hasSnapshot = vmMo.hasSnapshot();
|
||||
if(!hasSnapshot)
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
|
||||
else
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualEthernetCard.class });
|
||||
vmMo.ensureScsiDeviceController();
|
||||
} else {
|
||||
ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter();
|
||||
|
|
@ -2654,7 +2659,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
vmMo.safePowerOff(_shutdown_waitMs);
|
||||
|
||||
diskInfoBuilder = vmMo.getDiskInfoBuilder();
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
|
||||
hasSnapshot = vmMo.hasSnapshot();
|
||||
if(!hasSnapshot)
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
|
||||
else
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualEthernetCard.class });
|
||||
vmMo.ensureScsiDeviceController();
|
||||
} else {
|
||||
int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024));
|
||||
|
|
@ -2810,37 +2819,45 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
//
|
||||
DiskTO[] sortedDisks = sortVolumesByDeviceId(disks);
|
||||
for (DiskTO vol : sortedDisks) {
|
||||
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
|
||||
|
||||
if (vol.getType() == Volume.Type.ISO)
|
||||
continue;
|
||||
|
||||
|
||||
VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol);
|
||||
controllerKey = getDiskController(matchingExistingDisk, vol, vmSpec, ideControllerKey, scsiControllerKey);
|
||||
|
||||
VolumeObjectTO volumeTO = (VolumeObjectTO)vol.getData();
|
||||
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volumeTO.getDataStore();
|
||||
Pair<ManagedObjectReference, DatastoreMO> volumeDsDetails = dataStoresDetails.get(primaryStore.getUuid());
|
||||
assert (volumeDsDetails != null);
|
||||
VirtualDevice device;
|
||||
|
||||
String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec,
|
||||
vol, matchingExistingDisk,
|
||||
dataStoresDetails);
|
||||
if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber))
|
||||
scsiUnitNumber++;
|
||||
device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey,
|
||||
diskChain,
|
||||
volumeDsDetails.first(),
|
||||
(controllerKey == ideControllerKey) ? ideUnitNumber++ : scsiUnitNumber++, i + 1);
|
||||
|
||||
deviceConfigSpecArray[i].setDevice(device);
|
||||
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
|
||||
|
||||
if(s_logger.isDebugEnabled())
|
||||
s_logger.debug("Prepare volume at new device " + _gson.toJson(device));
|
||||
|
||||
i++;
|
||||
if(!hasSnapshot) {
|
||||
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
|
||||
|
||||
VolumeObjectTO volumeTO = (VolumeObjectTO)vol.getData();
|
||||
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volumeTO.getDataStore();
|
||||
Pair<ManagedObjectReference, DatastoreMO> volumeDsDetails = dataStoresDetails.get(primaryStore.getUuid());
|
||||
assert (volumeDsDetails != null);
|
||||
|
||||
String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec,
|
||||
vol, matchingExistingDisk,
|
||||
dataStoresDetails);
|
||||
if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber))
|
||||
scsiUnitNumber++;
|
||||
VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey,
|
||||
diskChain,
|
||||
volumeDsDetails.first(),
|
||||
(controllerKey == ideControllerKey) ? ideUnitNumber++ : scsiUnitNumber++, i + 1);
|
||||
|
||||
deviceConfigSpecArray[i].setDevice(device);
|
||||
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
|
||||
|
||||
if(s_logger.isDebugEnabled())
|
||||
s_logger.debug("Prepare volume at new device " + _gson.toJson(device));
|
||||
|
||||
i++;
|
||||
} else {
|
||||
if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber))
|
||||
scsiUnitNumber++;
|
||||
if(controllerKey == ideControllerKey)
|
||||
ideUnitNumber++;
|
||||
else
|
||||
scsiUnitNumber++;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
|
@ -2887,7 +2904,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
nicCount++;
|
||||
}
|
||||
|
||||
vmConfigSpec.getDeviceChange().addAll(Arrays.asList(deviceConfigSpecArray));
|
||||
for(int j = 0; j < i; j++)
|
||||
vmConfigSpec.getDeviceChange().add(deviceConfigSpecArray[j]);
|
||||
|
||||
//
|
||||
// Setup VM options
|
||||
|
|
@ -4439,7 +4457,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return str.replace('/', '-');
|
||||
}
|
||||
|
||||
private String trimIqn(String iqn) {
|
||||
public static String trimIqn(String iqn) {
|
||||
String[] tmp = iqn.split("/");
|
||||
|
||||
if (tmp.length != 3) {
|
||||
|
|
@ -4454,36 +4472,23 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
|
||||
@Override
|
||||
public ManagedObjectReference handleDatastoreAndVmdkAttach(Command cmd, String iqn, String storageHost, int storagePort,
|
||||
String initiatorUsername, String initiatorPassword, String targetUsername, String targetPassword) throws Exception {
|
||||
public void createVmdk(Command cmd, DatastoreMO dsMo, String vmdkDatastorePath, Long volumeSize) throws Exception {
|
||||
VmwareContext context = getServiceContext();
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(context);
|
||||
|
||||
ManagedObjectReference morDs = createVmfsDatastore(hyperHost, getDatastoreName(iqn),
|
||||
storageHost, storagePort, trimIqn(iqn),
|
||||
initiatorUsername, initiatorPassword,
|
||||
targetUsername, targetPassword);
|
||||
String dummyVmName = getWorkerName(context, cmd, 0);
|
||||
|
||||
DatastoreMO dsMo = new DatastoreMO(context, morDs);
|
||||
VirtualMachineMO vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName);
|
||||
|
||||
String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName());
|
||||
|
||||
if (!dsMo.fileExists(volumeDatastorePath)) {
|
||||
String dummyVmName = getWorkerName(context, cmd, 0);
|
||||
|
||||
VirtualMachineMO vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName);
|
||||
|
||||
if (vmMo == null) {
|
||||
throw new Exception("Unable to create a dummy VM for volume creation");
|
||||
}
|
||||
|
||||
vmMo.createDisk(volumeDatastorePath, getMBsFromBytes(dsMo.getSummary().getFreeSpace()),
|
||||
morDs, vmMo.getScsiDeviceControllerKey());
|
||||
vmMo.detachDisk(volumeDatastorePath, false);
|
||||
vmMo.destroy();
|
||||
if (vmMo == null) {
|
||||
throw new Exception("Unable to create a dummy VM for volume creation");
|
||||
}
|
||||
|
||||
return morDs;
|
||||
Long volumeSizeToUse = volumeSize < dsMo.getSummary().getFreeSpace() ? volumeSize : dsMo.getSummary().getFreeSpace();
|
||||
|
||||
vmMo.createDisk(vmdkDatastorePath, getMBsFromBytes(volumeSizeToUse), dsMo.getMor(), vmMo.getScsiDeviceControllerKey());
|
||||
vmMo.detachDisk(vmdkDatastorePath, false);
|
||||
vmMo.destroy();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -4516,9 +4521,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
ManagedObjectReference morDs = null;
|
||||
|
||||
if (cmd.getAttach() && cmd.isManaged()) {
|
||||
morDs = handleDatastoreAndVmdkAttach(cmd, cmd.get_iScsiName(), cmd.getStorageHost(), cmd.getStoragePort(),
|
||||
cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword(),
|
||||
cmd.getChapTargetUsername(), cmd.getChapTargetPassword());
|
||||
morDs = getVmfsDatastore(hyperHost, getDatastoreName(cmd.get_iScsiName()), cmd.getStorageHost(), cmd.getStoragePort(), trimIqn(cmd.get_iScsiName()),
|
||||
cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword(), cmd.getChapTargetUsername(), cmd.getChapTargetPassword());
|
||||
|
||||
DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs);
|
||||
|
||||
String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName());
|
||||
|
||||
if (!dsMo.fileExists(volumeDatastorePath)) {
|
||||
createVmdk(cmd, dsMo, VmwareResource.getDatastoreName(cmd.get_iScsiName()), cmd.getVolumeSize());
|
||||
}
|
||||
}
|
||||
else {
|
||||
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getPoolUuid());
|
||||
|
|
@ -4531,10 +4543,18 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
|
||||
DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs);
|
||||
VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), cmd.getVmName(),
|
||||
dsMo, cmd.getVolumePath());
|
||||
|
||||
String datastoreVolumePath = dsMo.searchFileInSubFolders(cmd.getVolumePath() + ".vmdk", true);
|
||||
|
||||
String datastoreVolumePath = null;
|
||||
|
||||
if (cmd.isManaged()) {
|
||||
datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk");
|
||||
}
|
||||
else {
|
||||
VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), cmd.getVmName(), dsMo, cmd.getVolumePath());
|
||||
|
||||
datastoreVolumePath = dsMo.searchFileInSubFolders(cmd.getVolumePath() + ".vmdk", true);
|
||||
}
|
||||
|
||||
assert (datastoreVolumePath != null) : "Virtual disk file must exist in specified datastore for attach/detach operations.";
|
||||
if (datastoreVolumePath == null) {
|
||||
throw new CloudRuntimeException("Unable to find file " + cmd.getVolumePath() + ".vmdk in datastore " + dsMo.getName());
|
||||
|
|
@ -4687,7 +4707,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
|
||||
private ManagedObjectReference createVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress,
|
||||
public ManagedObjectReference getVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress,
|
||||
int storagePortNumber, String iqn, String chapName, String chapSecret, String mutualChapName, String mutualChapSecret) throws Exception {
|
||||
VmwareContext context = getServiceContext();
|
||||
ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
|
||||
|
|
@ -5410,7 +5430,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
// tear down all devices first before we destroy the VM to avoid accidently delete disk backing files
|
||||
if (getVmState(vmMo) != State.Stopped)
|
||||
vmMo.safePowerOff(_shutdown_waitMs);
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
|
||||
vmMo.tearDownDevices(new Class<?>[] { /* VirtualDisk.class, */ VirtualEthernetCard.class });
|
||||
vmMo.destroy();
|
||||
|
||||
for (NetworkDetails netDetails : networks) {
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=CiscoNexusVSMDeviceDao.class) @DB(txn=false)
|
||||
@Local(value=CiscoNexusVSMDeviceDao.class) @DB
|
||||
public class CiscoNexusVSMDeviceDaoImpl extends GenericDaoBase<CiscoNexusVSMDeviceVO, Long> implements CiscoNexusVSMDeviceDao {
|
||||
protected static final Logger s_logger = Logger.getLogger(CiscoNexusVSMDeviceDaoImpl.class);
|
||||
final SearchBuilder<CiscoNexusVSMDeviceVO> mgmtVlanIdSearch;
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ import com.cloud.hypervisor.vmware.manager.VmwareStorageManager;
|
|||
import com.cloud.hypervisor.vmware.manager.VmwareStorageManagerImpl;
|
||||
import com.cloud.hypervisor.vmware.manager.VmwareStorageMount;
|
||||
import com.cloud.hypervisor.vmware.mo.ClusterMO;
|
||||
import com.cloud.hypervisor.vmware.mo.DatastoreMO;
|
||||
import com.cloud.hypervisor.vmware.mo.HostMO;
|
||||
import com.cloud.hypervisor.vmware.mo.VmwareHostType;
|
||||
import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost;
|
||||
|
|
@ -347,8 +348,12 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe
|
|||
return true;
|
||||
}
|
||||
|
||||
public ManagedObjectReference handleDatastoreAndVmdkAttach(Command cmd, String iqn, String storageHost, int storagePort,
|
||||
String initiatorUsername, String initiatorPassword, String targetUsername, String targetPassword) throws Exception {
|
||||
public ManagedObjectReference getVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber,
|
||||
String iqn, String initiatorChapName, String initiatorChapSecret, String mutualChapName, String mutualChapSecret) throws Exception {
|
||||
throw new OperationNotSupportedException();
|
||||
}
|
||||
|
||||
public void createVmdk(Command cmd, DatastoreMO dsMo, String volumeDatastorePath, Long volumeSize) throws Exception {
|
||||
throw new OperationNotSupportedException();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,22 +26,6 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.google.gson.Gson;
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpec;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
|
||||
import com.vmware.vim25.VirtualDisk;
|
||||
import com.vmware.vim25.VirtualEthernetCard;
|
||||
import com.vmware.vim25.VirtualLsiLogicController;
|
||||
import com.vmware.vim25.VirtualMachineConfigSpec;
|
||||
import com.vmware.vim25.VirtualMachineFileInfo;
|
||||
import com.vmware.vim25.VirtualMachineGuestOsIdentifier;
|
||||
import com.vmware.vim25.VirtualSCSISharing;
|
||||
|
||||
import org.apache.cloudstack.storage.command.AttachAnswer;
|
||||
import org.apache.cloudstack.storage.command.AttachCommand;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
|
|
@ -50,10 +34,14 @@ import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
|||
import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DettachCommand;
|
||||
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.Command;
|
||||
|
|
@ -88,10 +76,13 @@ import com.cloud.utils.Pair;
|
|||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.script.Script;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.google.gson.Gson;
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
import com.vmware.vim25.VirtualDisk;
|
||||
|
||||
public class VmwareStorageProcessor implements StorageProcessor {
|
||||
private static final Logger s_logger = Logger.getLogger(VmwareStorageProcessor.class);
|
||||
|
||||
|
||||
private VmwareHostService hostService;
|
||||
private boolean _fullCloneFlag;
|
||||
private VmwareStorageMount mountService;
|
||||
|
|
@ -128,9 +119,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl,
|
||||
String templatePathAtSecondaryStorage, String templateName, String templateUuid) throws Exception {
|
||||
String templatePathAtSecondaryStorage, String templateName, String templateUuid) throws Exception {
|
||||
|
||||
s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: "
|
||||
+ secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage
|
||||
|
|
@ -140,9 +131,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
s_logger.info("Secondary storage mount point: " + secondaryMountPoint);
|
||||
|
||||
String srcOVAFileName = VmwareStorageLayoutHelper.getTemplateOnSecStorageFilePath(
|
||||
secondaryMountPoint, templatePathAtSecondaryStorage,
|
||||
templateName, ImageFormat.OVA.getFileExtension());
|
||||
|
||||
secondaryMountPoint, templatePathAtSecondaryStorage,
|
||||
templateName, ImageFormat.OVA.getFileExtension());
|
||||
|
||||
String srcFileName = getOVFFilePath(srcOVAFileName);
|
||||
if(srcFileName == null) {
|
||||
Script command = new Script("tar", 0, s_logger);
|
||||
|
|
@ -178,8 +169,8 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
|
||||
if(vmMo.createSnapshot("cloud.template.base", "Base snapshot", false, false)) {
|
||||
// the same template may be deployed with multiple copies at per-datastore per-host basis,
|
||||
// save the original template name from CloudStack DB as the UUID to associate them.
|
||||
// the same template may be deployed with multiple copies at per-datastore per-host basis,
|
||||
// save the original template name from CloudStack DB as the UUID to associate them.
|
||||
vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, templateName);
|
||||
vmMo.markAsTemplate();
|
||||
} else {
|
||||
|
|
@ -197,7 +188,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
DataStoreTO srcStore = srcData.getDataStore();
|
||||
if (!(srcStore instanceof NfsTO)) {
|
||||
return new CopyCmdAnswer("unsupported protocol");
|
||||
}
|
||||
}
|
||||
NfsTO nfsImageStore = (NfsTO)srcStore;
|
||||
DataTO destData = cmd.getDestTO();
|
||||
DataStoreTO destStore = destData.getDataStore();
|
||||
|
|
@ -206,9 +197,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
assert (secondaryStorageUrl != null);
|
||||
|
||||
String templateUrl = secondaryStorageUrl + "/" + srcData.getPath();
|
||||
|
||||
|
||||
Pair<String, String> templateInfo = VmwareStorageLayoutHelper.decodeTemplateRelativePathAndNameFromUrl(
|
||||
secondaryStorageUrl, templateUrl, template.getName());
|
||||
secondaryStorageUrl, templateUrl, template.getName());
|
||||
|
||||
VmwareContext context = hostService.getServiceContext(cmd);
|
||||
try {
|
||||
|
|
@ -246,7 +237,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
return new CopyCmdAnswer(msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, DatastoreMO dsMo,
|
||||
String vmdkName, ManagedObjectReference morDatastore, ManagedObjectReference morPool) throws Exception {
|
||||
|
||||
|
|
@ -265,16 +256,16 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
|
||||
s_logger.info("Move volume out of volume-wrapper VM ");
|
||||
String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo,
|
||||
vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, true);
|
||||
String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo,
|
||||
vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true);
|
||||
|
||||
String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo,
|
||||
vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, true);
|
||||
String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo,
|
||||
vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true);
|
||||
|
||||
dsMo.moveDatastoreFile(vmwareLayoutFilePair[0],
|
||||
dcMo.getMor(), dsMo.getMor(),
|
||||
legacyCloudStackLayoutFilePair[0],
|
||||
dcMo.getMor(), true);
|
||||
|
||||
|
||||
dsMo.moveDatastoreFile(vmwareLayoutFilePair[1],
|
||||
dcMo.getMor(), dsMo.getMor(),
|
||||
legacyCloudStackLayoutFilePair[1],
|
||||
|
|
@ -292,18 +283,18 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
||||
|
||||
s_logger.info("Move volume out of volume-wrapper VM ");
|
||||
String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo,
|
||||
vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, false);
|
||||
String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo,
|
||||
vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, false);
|
||||
|
||||
String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo,
|
||||
vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, false);
|
||||
String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo,
|
||||
vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, false);
|
||||
|
||||
dsMo.moveDatastoreFile(vmwareLayoutFilePair[0],
|
||||
dcMo.getMor(), dsMo.getMor(),
|
||||
legacyCloudStackLayoutFilePair[0],
|
||||
dcMo.getMor(), true);
|
||||
|
||||
|
||||
dsMo.moveDatastoreFile(vmwareLayoutFilePair[1],
|
||||
dcMo.getMor(), dsMo.getMor(),
|
||||
legacyCloudStackLayoutFilePair[1],
|
||||
|
|
@ -343,17 +334,17 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
throw new Exception("Unable to create a dummy VM for volume creation");
|
||||
}
|
||||
|
||||
String vmdkFilePair[] = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, vmdkName,
|
||||
VmwareStorageLayoutType.CLOUDSTACK_LEGACY,
|
||||
true // we only use the first file in the pair, linked or not will not matter
|
||||
);
|
||||
String vmdkFilePair[] = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, vmdkName,
|
||||
VmwareStorageLayoutType.CLOUDSTACK_LEGACY,
|
||||
true // we only use the first file in the pair, linked or not will not matter
|
||||
);
|
||||
String volumeDatastorePath = vmdkFilePair[0];
|
||||
synchronized (this) {
|
||||
s_logger.info("Delete file if exists in datastore to clear the way for creating the volume. file: " + volumeDatastorePath);
|
||||
VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vmdkName, dcMo);
|
||||
vmMo.createDisk(volumeDatastorePath, (int) (volume.getSize() / (1024L * 1024L)), morDatastore, -1);
|
||||
vmMo.detachDisk(volumeDatastorePath, false);
|
||||
}
|
||||
}
|
||||
|
||||
VolumeObjectTO newVol = new VolumeObjectTO();
|
||||
newVol.setPath(vmdkName);
|
||||
|
|
@ -506,7 +497,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
|
||||
try {
|
||||
ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId);
|
||||
|
||||
|
||||
if (morDs == null) {
|
||||
String msg = "Unable to find volumes's storage pool for copy volume operation";
|
||||
s_logger.error(msg);
|
||||
|
|
@ -518,7 +509,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
// create a dummy worker vm for attaching the volume
|
||||
DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDs);
|
||||
workerVm = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVmName);
|
||||
|
||||
|
||||
if (workerVm == null) {
|
||||
String msg = "Unable to create worker VM to execute CopyVolumeCommand";
|
||||
s_logger.error(msg);
|
||||
|
|
@ -657,7 +648,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
Pair<VirtualMachineMO, String[]> cloneResult = vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(),
|
||||
VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first()));
|
||||
clonedVm = cloneResult.first();
|
||||
|
||||
|
||||
clonedVm.exportVm(secondaryMountPoint + "/" + installPath, templateUniqueName, true, false);
|
||||
|
||||
long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length();
|
||||
|
|
@ -960,7 +951,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
throw new Exception("unable to prepare snapshot backup directory");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VirtualMachineMO clonedVm = null;
|
||||
try {
|
||||
|
|
@ -974,7 +965,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
|
||||
// 4 MB is the minimum requirement for VM memory in VMware
|
||||
Pair<VirtualMachineMO, String[]> cloneResult = vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(),
|
||||
VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first()));
|
||||
VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first()));
|
||||
clonedVm = cloneResult.first();
|
||||
String disks[] = cloneResult.second();
|
||||
|
||||
|
|
@ -998,7 +989,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
installPath, backupUuid, workerVmName);
|
||||
return new Ternary<String, String, String[]>(backupUuid + "/" + backupUuid, snapshotInfo.first(), snapshotInfo.second());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Answer backupSnapshot(CopyCommand cmd) {
|
||||
SnapshotObjectTO srcSnapshot = (SnapshotObjectTO)cmd.getSrcTO();
|
||||
|
|
@ -1025,7 +1016,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
String details = null;
|
||||
boolean success = false;
|
||||
String snapshotBackupUuid = null;
|
||||
|
||||
|
||||
boolean hasOwnerVm = false;
|
||||
Ternary<String, String, String[]> backupResult = null;
|
||||
|
||||
|
|
@ -1037,7 +1028,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStore.getUuid());
|
||||
|
||||
CopyCmdAnswer answer = null;
|
||||
|
||||
|
||||
try {
|
||||
vmMo = hyperHost.findVmOnHyperHost(vmName);
|
||||
if (vmMo == null) {
|
||||
|
|
@ -1050,7 +1041,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
dsMo = new DatastoreMO(hyperHost.getContext(), morDs);
|
||||
|
||||
workerVMName = hostService.getWorkerName(context, cmd, 0);
|
||||
|
||||
|
||||
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVMName);
|
||||
|
||||
if (vmMo == null) {
|
||||
|
|
@ -1062,12 +1053,12 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
String datastoreVolumePath = dsMo.getDatastorePath(volumePath + ".vmdk");
|
||||
vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs);
|
||||
} else {
|
||||
s_logger.info("Using owner VM " + vmName + " for snapshot operation");
|
||||
hasOwnerVm = true;
|
||||
s_logger.info("Using owner VM " + vmName + " for snapshot operation");
|
||||
hasOwnerVm = true;
|
||||
}
|
||||
} else {
|
||||
s_logger.info("Using owner VM " + vmName + " for snapshot operation");
|
||||
hasOwnerVm = true;
|
||||
s_logger.info("Using owner VM " + vmName + " for snapshot operation");
|
||||
hasOwnerVm = true;
|
||||
}
|
||||
|
||||
if (!vmMo.createSnapshot(snapshotUuid, "Snapshot taken for " + srcSnapshot.getName(), false, false)) {
|
||||
|
|
@ -1093,52 +1084,52 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
ManagedObjectReference snapshotMor = vmMo.getSnapshotMor(snapshotUuid);
|
||||
if (snapshotMor != null) {
|
||||
vmMo.removeSnapshot(snapshotUuid, false);
|
||||
|
||||
|
||||
// Snapshot operation may cause disk consolidation in VMware, when this happens
|
||||
// we need to update CloudStack DB
|
||||
//
|
||||
// TODO: this post operation fixup is not atomic and not safe when management server stops
|
||||
// in the middle
|
||||
if(backupResult != null && hasOwnerVm) {
|
||||
s_logger.info("Check if we have disk consolidation after snapshot operation");
|
||||
|
||||
boolean chainConsolidated = false;
|
||||
for(String vmdkDsFilePath : backupResult.third()) {
|
||||
s_logger.info("Validate disk chain file:" + vmdkDsFilePath);
|
||||
|
||||
if(vmMo.getDiskDevice(vmdkDsFilePath, false) == null) {
|
||||
s_logger.info("" + vmdkDsFilePath + " no longer exists, consolidation detected");
|
||||
chainConsolidated = true;
|
||||
break;
|
||||
} else {
|
||||
s_logger.info("" + vmdkDsFilePath + " is found still in chain");
|
||||
}
|
||||
}
|
||||
|
||||
if(chainConsolidated) {
|
||||
String topVmdkFilePath = null;
|
||||
try {
|
||||
topVmdkFilePath = vmMo.getDiskCurrentTopBackingFileInChain(backupResult.second());
|
||||
} catch(Exception e) {
|
||||
s_logger.error("Unexpected exception", e);
|
||||
}
|
||||
|
||||
s_logger.info("Disk has been consolidated, top VMDK is now: " + topVmdkFilePath);
|
||||
if(topVmdkFilePath != null) {
|
||||
DatastoreFile file = new DatastoreFile(topVmdkFilePath);
|
||||
|
||||
SnapshotObjectTO snapshotInfo = (SnapshotObjectTO)answer.getNewData();
|
||||
VolumeObjectTO vol = new VolumeObjectTO();
|
||||
vol.setUuid(srcSnapshot.getVolume().getUuid());
|
||||
vol.setPath(file.getFileBaseName());
|
||||
snapshotInfo.setVolume(vol);
|
||||
} else {
|
||||
s_logger.error("Disk has been consolidated, but top VMDK is not found ?!");
|
||||
}
|
||||
}
|
||||
s_logger.info("Check if we have disk consolidation after snapshot operation");
|
||||
|
||||
boolean chainConsolidated = false;
|
||||
for(String vmdkDsFilePath : backupResult.third()) {
|
||||
s_logger.info("Validate disk chain file:" + vmdkDsFilePath);
|
||||
|
||||
if(vmMo.getDiskDevice(vmdkDsFilePath, false) == null) {
|
||||
s_logger.info("" + vmdkDsFilePath + " no longer exists, consolidation detected");
|
||||
chainConsolidated = true;
|
||||
break;
|
||||
} else {
|
||||
s_logger.info("" + vmdkDsFilePath + " is found still in chain");
|
||||
}
|
||||
}
|
||||
|
||||
if(chainConsolidated) {
|
||||
String topVmdkFilePath = null;
|
||||
try {
|
||||
topVmdkFilePath = vmMo.getDiskCurrentTopBackingFileInChain(backupResult.second());
|
||||
} catch(Exception e) {
|
||||
s_logger.error("Unexpected exception", e);
|
||||
}
|
||||
|
||||
s_logger.info("Disk has been consolidated, top VMDK is now: " + topVmdkFilePath);
|
||||
if(topVmdkFilePath != null) {
|
||||
DatastoreFile file = new DatastoreFile(topVmdkFilePath);
|
||||
|
||||
SnapshotObjectTO snapshotInfo = (SnapshotObjectTO)answer.getNewData();
|
||||
VolumeObjectTO vol = new VolumeObjectTO();
|
||||
vol.setUuid(srcSnapshot.getVolume().getUuid());
|
||||
vol.setPath(file.getFileBaseName());
|
||||
snapshotInfo.setVolume(vol);
|
||||
} else {
|
||||
s_logger.error("Disk has been consolidated, but top VMDK is not found ?!");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s_logger.error("Can not find the snapshot we just used ?!");
|
||||
s_logger.error("Can not find the snapshot we just used ?!");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1152,7 +1143,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
s_logger.warn("Failed to destroy worker VM: " + workerVMName);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return answer;
|
||||
} catch (Throwable e) {
|
||||
if (e instanceof RemoteException) {
|
||||
|
|
@ -1200,12 +1191,20 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
ManagedObjectReference morDs = null;
|
||||
|
||||
if (isAttach && isManaged) {
|
||||
morDs = hostService.handleDatastoreAndVmdkAttach(cmd, iScsiName, storageHost, storagePort,
|
||||
initiatorUsername, initiatorPassword, targetUsername, targetPassword);
|
||||
morDs = hostService.getVmfsDatastore(hyperHost, VmwareResource.getDatastoreName(iScsiName), storageHost, storagePort,
|
||||
VmwareResource.trimIqn(iScsiName), initiatorUsername, initiatorPassword, targetUsername, targetPassword);
|
||||
|
||||
DatastoreMO dsMo = new DatastoreMO(hostService.getServiceContext(null), morDs);
|
||||
|
||||
String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName());
|
||||
|
||||
if (!dsMo.fileExists(volumeDatastorePath)) {
|
||||
hostService.createVmdk(cmd, dsMo, VmwareResource.getDatastoreName(iScsiName), volumeTO.getSize());
|
||||
}
|
||||
}
|
||||
else {
|
||||
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, isManaged ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid());
|
||||
}
|
||||
}
|
||||
|
||||
if (morDs == null) {
|
||||
String msg = "Unable to find the mounted datastore to execute AttachVolumeCommand, vmName: " + vmName;
|
||||
|
|
@ -1216,31 +1215,42 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
DatastoreMO dsMo = new DatastoreMO(this.hostService.getServiceContext(null), morDs);
|
||||
String datastoreVolumePath;
|
||||
|
||||
if(isAttach) {
|
||||
if(!isManaged)
|
||||
datastoreVolumePath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), vmName,
|
||||
dsMo, volumeTO.getPath());
|
||||
else
|
||||
datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk");
|
||||
} else {
|
||||
datastoreVolumePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, volumeTO.getPath() + ".vmdk");
|
||||
if(!dsMo.fileExists(datastoreVolumePath))
|
||||
datastoreVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, volumeTO.getPath() + ".vmdk");
|
||||
if (isAttach) {
|
||||
if (isManaged) {
|
||||
datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk");
|
||||
}
|
||||
else {
|
||||
datastoreVolumePath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), vmName, dsMo, volumeTO.getPath());
|
||||
}
|
||||
}
|
||||
|
||||
else {
|
||||
if (isManaged) {
|
||||
datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk");
|
||||
}
|
||||
else {
|
||||
datastoreVolumePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, volumeTO.getPath() + ".vmdk");
|
||||
|
||||
if (!dsMo.fileExists(datastoreVolumePath)) {
|
||||
datastoreVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, volumeTO.getPath() + ".vmdk");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
disk.setVdiUuid(datastoreVolumePath);
|
||||
|
||||
AttachAnswer answer = new AttachAnswer(disk);
|
||||
|
||||
if (isAttach) {
|
||||
vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs);
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
vmMo.removeAllSnapshots();
|
||||
vmMo.detachDisk(datastoreVolumePath, false);
|
||||
|
||||
if (isManaged) {
|
||||
this.hostService.handleDatastoreAndVmdkDetach(iScsiName, storageHost, storagePort);
|
||||
} else {
|
||||
VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, volumeTO.getPath());
|
||||
VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, volumeTO.getPath());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1274,7 +1284,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
|
||||
return morDatastore;
|
||||
}
|
||||
|
||||
|
||||
private Answer attachIso(DiskTO disk, boolean isAttach, String vmName) {
|
||||
try {
|
||||
VmwareHypervisorHost hyperHost = hostService.getHyperHost(hostService.getServiceContext(null), null);
|
||||
|
|
@ -1387,7 +1397,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
String volumeDatastorePath = dsMo.getDatastorePath(volumeUuid + ".vmdk");
|
||||
String dummyVmName = this.hostService.getWorkerName(context, cmd, 0);
|
||||
try {
|
||||
s_logger.info("Create worker VM " + dummyVmName);
|
||||
s_logger.info("Create worker VM " + dummyVmName);
|
||||
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName);
|
||||
if (vmMo == null) {
|
||||
throw new Exception("Unable to create a dummy VM for volume creation");
|
||||
|
|
@ -1408,8 +1418,8 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
} finally {
|
||||
s_logger.info("Destroy dummy VM after volume creation");
|
||||
if(vmMo != null) {
|
||||
vmMo.detachAllDisks();
|
||||
vmMo.destroy();
|
||||
vmMo.detachAllDisks();
|
||||
vmMo.destroy();
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
|
|
@ -1460,7 +1470,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
ClusterMO clusterMo = new ClusterMO(context, morCluster);
|
||||
|
||||
if (vol.getVolumeType() == Volume.Type.ROOT) {
|
||||
|
||||
|
||||
String vmName = vol.getVmName();
|
||||
if (vmName != null) {
|
||||
VirtualMachineMO vmMo = clusterMo.findVmOnHyperHost(vmName);
|
||||
|
|
@ -1471,12 +1481,12 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
|
||||
// Remove all snapshots to consolidate disks for removal
|
||||
vmMo.removeAllSnapshots();
|
||||
|
||||
|
||||
VirtualMachineDiskInfo diskInfo = null;
|
||||
if(vol.getChainInfo() != null)
|
||||
diskInfo = _gson.fromJson(vol.getChainInfo(), VirtualMachineDiskInfo.class);
|
||||
|
||||
|
||||
diskInfo = _gson.fromJson(vol.getChainInfo(), VirtualMachineDiskInfo.class);
|
||||
|
||||
|
||||
HostMO hostMo = vmMo.getRunningHost();
|
||||
List<NetworkDetails> networks = vmMo.getNetworksWithDetails();
|
||||
|
||||
|
|
@ -1484,7 +1494,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
if (this.resource.getVmState(vmMo) != State.Stopped) {
|
||||
vmMo.safePowerOff(_shutdown_waitMs);
|
||||
}
|
||||
|
||||
|
||||
List<String> detachedDisks = vmMo.detachAllDisksExcept(vol.getPath(), diskInfo != null ? diskInfo.getDiskDeviceBusName() : null);
|
||||
VmwareStorageLayoutHelper.moveVolumeToRootFolder(new DatacenterMO(context, morDc), detachedDisks);
|
||||
|
||||
|
|
@ -1501,13 +1511,13 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("Destroy volume by original name: " + vol.getPath() + ".vmdk");
|
||||
}
|
||||
|
||||
VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc));
|
||||
*/
|
||||
*/
|
||||
return new Answer(cmd, true, "Success");
|
||||
}
|
||||
|
||||
|
|
@ -1527,8 +1537,8 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc));
|
||||
|
||||
VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc));
|
||||
|
||||
return new Answer(cmd, true, "Success");
|
||||
} catch (Throwable e) {
|
||||
if (e instanceof RemoteException) {
|
||||
|
|
@ -1672,10 +1682,20 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
return new Answer(cmd, false, "unsupported command");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Answer introduceObject(IntroduceObjectCmd cmd) {
|
||||
return new Answer(cmd, false, "not implememented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer forgetObject(ForgetObjectCmd cmd) {
|
||||
return new Answer(cmd, false, "not implememented yet");
|
||||
}
|
||||
|
||||
private static String deriveTemplateUuidOnHost(VmwareHypervisorHost hyperHost, String storeIdentifier, String templateName) {
|
||||
String templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes()).toString();
|
||||
templateUuid = templateUuid.replaceAll("-", "");
|
||||
return templateUuid;
|
||||
String templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes()).toString();
|
||||
templateUuid = templateUuid.replaceAll("-", "");
|
||||
return templateUuid;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5348,7 +5348,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
if (pool.getType() == StoragePoolType.NetworkFilesystem) {
|
||||
getNfsSR(conn, pool);
|
||||
} else if (pool.getType() == StoragePoolType.IscsiLUN) {
|
||||
getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, new Boolean[1]);
|
||||
getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null);
|
||||
} else if (pool.getType() == StoragePoolType.PreSetup) {
|
||||
} else {
|
||||
return new Answer(cmd, false, "The pool type: " + pool.getType().name() + " is not supported.");
|
||||
|
|
@ -6166,17 +6166,27 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
}
|
||||
|
||||
protected VDI getVDIbyUuid(Connection conn, String uuid) {
|
||||
return getVDIbyUuid(conn, uuid, true);
|
||||
}
|
||||
|
||||
protected VDI getVDIbyUuid(Connection conn, String uuid, boolean throwExceptionIfNotFound) {
|
||||
try {
|
||||
return VDI.getByUuid(conn, uuid);
|
||||
} catch (Exception e) {
|
||||
String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString();
|
||||
s_logger.debug(msg);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
if (throwExceptionIfNotFound) {
|
||||
String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString();
|
||||
|
||||
s_logger.debug(msg);
|
||||
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
protected SR getIscsiSR(Connection conn, String srNameLabel, String target, String path,
|
||||
String chapInitiatorUsername, String chapInitiatorPassword, Boolean[] created) {
|
||||
String chapInitiatorUsername, String chapInitiatorPassword) {
|
||||
synchronized (srNameLabel.intern()) {
|
||||
Map<String, String> deviceConfig = new HashMap<String, String>();
|
||||
try {
|
||||
|
|
@ -6280,8 +6290,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
{
|
||||
sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true,
|
||||
smConfig);
|
||||
|
||||
created[0] = true; // note that the SR was created (as opposed to introduced)
|
||||
} else {
|
||||
sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel,
|
||||
type, "user", true, smConfig);
|
||||
|
|
@ -6459,54 +6467,41 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
}
|
||||
}
|
||||
|
||||
protected VDI handleSrAndVdiAttach(String iqn, String storageHostName,
|
||||
String chapInitiatorName, String chapInitiatorPassword) throws Types.XenAPIException, XmlRpcException {
|
||||
protected VDI createVdi(SR sr, String vdiNameLabel, Long volumeSize) throws Types.XenAPIException, XmlRpcException {
|
||||
VDI vdi = null;
|
||||
|
||||
Connection conn = getConnection();
|
||||
|
||||
Boolean[] created = { false };
|
||||
VDI.Record vdir = new VDI.Record();
|
||||
|
||||
SR sr = getIscsiSR(conn, iqn,
|
||||
storageHostName, iqn,
|
||||
chapInitiatorName, chapInitiatorPassword, created);
|
||||
vdir.nameLabel = vdiNameLabel;
|
||||
vdir.SR = sr;
|
||||
vdir.type = Types.VdiType.USER;
|
||||
|
||||
// if created[0] is true, this means the SR was actually created...as opposed to introduced
|
||||
if (created[0]) {
|
||||
VDI.Record vdir = new VDI.Record();
|
||||
|
||||
vdir.nameLabel = iqn;
|
||||
vdir.SR = sr;
|
||||
vdir.type = Types.VdiType.USER;
|
||||
|
||||
long totalSpace = sr.getPhysicalSize(conn);
|
||||
long unavailableSpace = sr.getPhysicalUtilisation(conn);
|
||||
|
||||
vdir.virtualSize = totalSpace - unavailableSpace;
|
||||
|
||||
if (vdir.virtualSize < 0) {
|
||||
throw new CloudRuntimeException("VDI virtual size cannot be less than 0.");
|
||||
}
|
||||
|
||||
long maxNumberOfTries = (totalSpace / unavailableSpace >= 1) ? (totalSpace / unavailableSpace) : 1;
|
||||
long tryNumber = 0;
|
||||
|
||||
while (tryNumber <= maxNumberOfTries) {
|
||||
try {
|
||||
vdi = VDI.create(conn, vdir);
|
||||
|
||||
break;
|
||||
}
|
||||
catch (Exception ex) {
|
||||
tryNumber++;
|
||||
|
||||
vdir.virtualSize -= unavailableSpace;
|
||||
}
|
||||
}
|
||||
long totalSrSpace = sr.getPhysicalSize(conn);
|
||||
long unavailableSrSpace = sr.getPhysicalUtilisation(conn);
|
||||
long availableSrSpace = totalSrSpace - unavailableSrSpace;
|
||||
|
||||
if (availableSrSpace < volumeSize) {
|
||||
throw new CloudRuntimeException("Available space for SR cannot be less than " + volumeSize + ".");
|
||||
}
|
||||
else {
|
||||
vdi = sr.getVDIs(conn).iterator().next();
|
||||
|
||||
vdir.virtualSize = volumeSize;
|
||||
|
||||
long maxNumberOfTries = (totalSrSpace / unavailableSrSpace >= 1) ? (totalSrSpace / unavailableSrSpace) : 1;
|
||||
long tryNumber = 0;
|
||||
|
||||
while (tryNumber <= maxNumberOfTries) {
|
||||
try {
|
||||
vdi = VDI.create(conn, vdir);
|
||||
|
||||
break;
|
||||
}
|
||||
catch (Exception ex) {
|
||||
tryNumber++;
|
||||
|
||||
vdir.virtualSize -= unavailableSrSpace;
|
||||
}
|
||||
}
|
||||
|
||||
return vdi;
|
||||
|
|
@ -6534,12 +6529,17 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
}
|
||||
|
||||
try {
|
||||
// Look up the VDI
|
||||
VDI vdi = null;
|
||||
|
||||
if (cmd.getAttach() && cmd.isManaged()) {
|
||||
vdi = handleSrAndVdiAttach(cmd.get_iScsiName(), cmd.getStorageHost(),
|
||||
cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword());
|
||||
SR sr = getIscsiSR(conn, cmd.get_iScsiName(), cmd.getStorageHost(), cmd.get_iScsiName(),
|
||||
cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword());
|
||||
|
||||
vdi = getVDIbyUuid(conn, cmd.getVolumePath(), false);
|
||||
|
||||
if (vdi == null) {
|
||||
vdi = createVdi(sr, cmd.get_iScsiName(), cmd.getVolumeSize());
|
||||
}
|
||||
}
|
||||
else {
|
||||
vdi = getVDIbyUuid(conn, cmd.getVolumePath());
|
||||
|
|
|
|||
|
|
@ -18,6 +18,36 @@
|
|||
*/
|
||||
package com.cloud.hypervisor.xen.resource;
|
||||
|
||||
import java.io.File;
|
||||
import java.net.URI;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.cloudstack.storage.command.AttachAnswer;
|
||||
import org.apache.cloudstack.storage.command.AttachCommand;
|
||||
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer;
|
||||
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DettachAnswer;
|
||||
import org.apache.cloudstack.storage.command.DettachCommand;
|
||||
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectAnswer;
|
||||
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
|
||||
import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.xmlrpc.XmlRpcException;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.CreateStoragePoolCommand;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
|
|
@ -51,33 +81,6 @@ import com.xensource.xenapi.VBD;
|
|||
import com.xensource.xenapi.VDI;
|
||||
import com.xensource.xenapi.VM;
|
||||
import com.xensource.xenapi.VMGuestMetrics;
|
||||
import org.apache.cloudstack.storage.command.AttachAnswer;
|
||||
import org.apache.cloudstack.storage.command.AttachCommand;
|
||||
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer;
|
||||
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DettachAnswer;
|
||||
import org.apache.cloudstack.storage.command.DettachCommand;
|
||||
import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.xmlrpc.XmlRpcException;
|
||||
|
||||
import java.io.File;
|
||||
import java.net.URI;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import static com.cloud.utils.ReflectUtil.flattenProperties;
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
|
@ -164,12 +167,20 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
|
||||
try {
|
||||
Connection conn = this.hypervisorResource.getConnection();
|
||||
// Look up the VDI
|
||||
|
||||
VDI vdi = null;
|
||||
|
||||
if (cmd.isManaged()) {
|
||||
vdi = this.hypervisorResource.handleSrAndVdiAttach(cmd.get_iScsiName(), cmd.getStorageHost(),
|
||||
cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword());
|
||||
SR sr = this.hypervisorResource.getIscsiSR(conn, cmd.get_iScsiName(), cmd.getStorageHost(), cmd.get_iScsiName(),
|
||||
cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword());
|
||||
|
||||
vdi = this.hypervisorResource.getVDIbyUuid(conn, data.getPath(), false);
|
||||
|
||||
if (vdi == null) {
|
||||
VolumeObjectTO volume = (VolumeObjectTO)data;
|
||||
|
||||
vdi = this.hypervisorResource.createVdi(sr, cmd.get_iScsiName(), volume.getSize());
|
||||
}
|
||||
}
|
||||
else {
|
||||
vdi = this.hypervisorResource.mount(conn, null, null, data.getPath());
|
||||
|
|
@ -841,8 +852,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
|
||||
URI uri = new URI(storeUrl);
|
||||
String tmplpath = uri.getHost() + ":" + uri.getPath() + "/" + srcData.getPath();
|
||||
PrimaryDataStoreTO destStore = (PrimaryDataStoreTO)destData.getDataStore();
|
||||
String poolName = destStore.getUuid();
|
||||
String poolName = destData.getDataStore().getUuid();
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
|
||||
SR poolsr = null;
|
||||
|
|
@ -892,8 +902,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
|
||||
try {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)data.getDataStore();
|
||||
SR poolSr = hypervisorResource.getStorageRepository(conn, primaryStore.getUuid());
|
||||
SR poolSr = hypervisorResource.getStorageRepository(conn, data.getDataStore().getUuid());
|
||||
VDI.Record vdir = new VDI.Record();
|
||||
vdir.nameLabel = volume.getName();
|
||||
vdir.SR = poolSr;
|
||||
|
|
@ -921,7 +930,6 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
Connection conn = hypervisorResource.getConnection();
|
||||
DataTO srcData = cmd.getSrcTO();
|
||||
DataTO destData = cmd.getDestTO();
|
||||
PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore();
|
||||
VolumeObjectTO volume = (VolumeObjectTO)destData;
|
||||
VDI vdi = null;
|
||||
try {
|
||||
|
|
@ -943,7 +951,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
|
||||
return new CopyCmdAnswer(newVol);
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to create volume; Pool=" + pool + "; Disk: ", e);
|
||||
s_logger.warn("Unable to create volume; Pool=" + destData + "; Disk: ", e);
|
||||
return new CopyCmdAnswer(e.toString());
|
||||
}
|
||||
}
|
||||
|
|
@ -956,13 +964,12 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
int wait = cmd.getWait();
|
||||
VolumeObjectTO srcVolume = (VolumeObjectTO)srcData;
|
||||
VolumeObjectTO destVolume = (VolumeObjectTO)destData;
|
||||
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)destVolume.getDataStore();
|
||||
DataStoreTO srcStore = srcVolume.getDataStore();
|
||||
|
||||
if (srcStore instanceof NfsTO) {
|
||||
NfsTO nfsStore = (NfsTO)srcStore;
|
||||
try {
|
||||
SR primaryStoragePool = hypervisorResource.getStorageRepository(conn, primaryStore.getUuid());
|
||||
SR primaryStoragePool = hypervisorResource.getStorageRepository(conn, destVolume.getDataStore().getUuid());
|
||||
String srUuid = primaryStoragePool.getUuid(conn);
|
||||
URI uri = new URI(nfsStore.getUrl());
|
||||
String volumePath = uri.getHost() + ":" + uri.getPath() + File.separator + srcVolume.getPath();
|
||||
|
|
@ -1179,8 +1186,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
DataTO cacheData = cmd.getCacheTO();
|
||||
DataTO destData = cmd.getDestTO();
|
||||
int wait = cmd.getWait();
|
||||
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcData.getDataStore();
|
||||
String primaryStorageNameLabel = primaryStore.getUuid();
|
||||
String primaryStorageNameLabel = srcData.getDataStore().getUuid();
|
||||
String secondaryStorageUrl = null;
|
||||
NfsTO cacheStore = null;
|
||||
String destPath = null;
|
||||
|
|
@ -1415,7 +1421,6 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
DataTO srcData = cmd.getSrcTO();
|
||||
SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData;
|
||||
DataTO destData = cmd.getDestTO();
|
||||
PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore();
|
||||
DataStoreTO imageStore = srcData.getDataStore();
|
||||
|
||||
if (!(imageStore instanceof NfsTO)) {
|
||||
|
|
@ -1423,7 +1428,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
|
||||
NfsTO nfsImageStore = (NfsTO)imageStore;
|
||||
String primaryStorageNameLabel = pool.getUuid();
|
||||
String primaryStorageNameLabel = destData.getDataStore().getUuid();
|
||||
String secondaryStorageUrl = nfsImageStore.getUrl();
|
||||
int wait = cmd.getWait();
|
||||
boolean result = false;
|
||||
|
|
@ -1503,4 +1508,32 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
return new Answer(cmd, false, "unsupported storage type");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer introduceObject(IntroduceObjectCmd cmd) {
|
||||
try {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
DataStoreTO store = cmd.getDataTO().getDataStore();
|
||||
SR poolSr = hypervisorResource.getStorageRepository(conn, store.getUuid());
|
||||
poolSr.scan(conn);
|
||||
return new IntroduceObjectAnswer(cmd.getDataTO());
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Failed to introduce object", e);
|
||||
return new Answer(cmd, false, e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer forgetObject(ForgetObjectCmd cmd) {
|
||||
try {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
DataTO data = cmd.getDataTO();
|
||||
VDI vdi = VDI.getByUuid(conn, data.getPath());
|
||||
vdi.forget(conn);
|
||||
return new IntroduceObjectAnswer(cmd.getDataTO());
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Failed to introduce object", e);
|
||||
return new Answer(cmd, false, e.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
||||
@Component
|
||||
@Local(value=NetScalerPodDao.class) @DB(txn=false)
|
||||
@Local(value=NetScalerPodDao.class) @DB
|
||||
public class NetScalerPodDaoImpl extends GenericDaoBase<NetScalerPodVO, Long> implements NetScalerPodDao {
|
||||
|
||||
final SearchBuilder<NetScalerPodVO> podIdSearch;
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue