diff --git a/agent/src/com/cloud/agent/AgentShell.java b/agent/src/com/cloud/agent/AgentShell.java index bf1e8180e44..42adac07165 100644 --- a/agent/src/com/cloud/agent/AgentShell.java +++ b/agent/src/com/cloud/agent/AgentShell.java @@ -19,14 +19,11 @@ package com.cloud.agent; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.InputStream; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Collections; -import java.util.Date; import java.util.Enumeration; import java.util.HashMap; import java.util.List; @@ -39,9 +36,7 @@ import javax.naming.ConfigurationException; import org.apache.commons.daemon.Daemon; import org.apache.commons.daemon.DaemonContext; import org.apache.commons.daemon.DaemonInitException; -import org.apache.commons.httpclient.HttpClient; import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager; -import org.apache.commons.httpclient.methods.GetMethod; import org.apache.log4j.Logger; import org.apache.log4j.xml.DOMConfigurator; @@ -56,7 +51,6 @@ import com.cloud.utils.PropertiesUtil; import com.cloud.utils.backoff.BackoffAlgorithm; import com.cloud.utils.backoff.impl.ConstantTimeBackoff; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; public class AgentShell implements IAgentShell, Daemon { private static final Logger s_logger = Logger.getLogger(AgentShell.class diff --git a/api/src/com/cloud/agent/api/to/DataStoreTO.java b/api/src/com/cloud/agent/api/to/DataStoreTO.java index 9014f8e2b81..b79ba7d64be 100644 --- a/api/src/com/cloud/agent/api/to/DataStoreTO.java +++ b/api/src/com/cloud/agent/api/to/DataStoreTO.java @@ -20,7 +20,7 @@ package com.cloud.agent.api.to; import com.cloud.storage.DataStoreRole; - public interface DataStoreTO { public DataStoreRole getRole(); + public String getUuid(); } diff --git a/api/src/com/cloud/agent/api/to/NfsTO.java b/api/src/com/cloud/agent/api/to/NfsTO.java index 415c95ce3f5..54683c7f410 100644 --- a/api/src/com/cloud/agent/api/to/NfsTO.java +++ b/api/src/com/cloud/agent/api/to/NfsTO.java @@ -22,6 +22,7 @@ public class NfsTO implements DataStoreTO { private String _url; private DataStoreRole _role; + private String uuid; public NfsTO() { @@ -55,6 +56,12 @@ public class NfsTO implements DataStoreTO { this._role = _role; } + @Override + public String getUuid() { + return uuid; + } - + public void setUuid(String uuid) { + this.uuid = uuid; + } } diff --git a/api/src/com/cloud/agent/api/to/S3TO.java b/api/src/com/cloud/agent/api/to/S3TO.java index b1b692a8bad..ab08a696c96 100644 --- a/api/src/com/cloud/agent/api/to/S3TO.java +++ b/api/src/com/cloud/agent/api/to/S3TO.java @@ -39,6 +39,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { private Integer socketTimeout; private Date created; private boolean enableRRS; + private boolean multipartEnabled; public S3TO() { @@ -50,7 +51,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { final String secretKey, final String endPoint, final String bucketName, final Boolean httpsFlag, final Integer connectionTimeout, final Integer maxErrorRetry, - final Integer socketTimeout, final Date created, final boolean enableRRS) { + final Integer socketTimeout, final Date created, final boolean enableRRS, final boolean multipart) { super(); @@ -66,6 +67,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { this.socketTimeout = socketTimeout; this.created = created; this.enableRRS = enableRRS; + this.multipartEnabled = multipart; } @@ -268,7 +270,6 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { } - public boolean getEnableRRS() { return enableRRS; } @@ -277,5 +278,14 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { this.enableRRS = enableRRS; } + public boolean isMultipartEnabled() { + return multipartEnabled; + } + + public void setMultipartEnabled(boolean multipartEnabled) { + this.multipartEnabled = multipartEnabled; + } + + } diff --git a/api/src/com/cloud/agent/api/to/SwiftTO.java b/api/src/com/cloud/agent/api/to/SwiftTO.java index 7349d7779ac..3ad131ac4d8 100644 --- a/api/src/com/cloud/agent/api/to/SwiftTO.java +++ b/api/src/com/cloud/agent/api/to/SwiftTO.java @@ -29,8 +29,7 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { public SwiftTO() { } - public SwiftTO(Long id, String url, String account, String userName, String key - ) { + public SwiftTO(Long id, String url, String account, String userName, String key) { this.id = id; this.url = url; this.account = account; @@ -46,14 +45,17 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { return url; } + @Override public String getAccount() { return account; } + @Override public String getUserName() { return userName; } + @Override public String getKey() { return key; } @@ -67,4 +69,9 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { public String getEndPoint() { return this.url; } + + @Override + public String getUuid() { + return null; + } } diff --git a/api/src/com/cloud/event/EventTypes.java b/api/src/com/cloud/event/EventTypes.java index ec9604e3f90..a762606e9ad 100755 --- a/api/src/com/cloud/event/EventTypes.java +++ b/api/src/com/cloud/event/EventTypes.java @@ -199,6 +199,7 @@ public class EventTypes { // Snapshots public static final String EVENT_SNAPSHOT_CREATE = "SNAPSHOT.CREATE"; public static final String EVENT_SNAPSHOT_DELETE = "SNAPSHOT.DELETE"; + public static final String EVENT_SNAPSHOT_REVERT = "SNAPSHOT.REVERT"; public static final String EVENT_SNAPSHOT_POLICY_CREATE = "SNAPSHOTPOLICY.CREATE"; public static final String EVENT_SNAPSHOT_POLICY_UPDATE = "SNAPSHOTPOLICY.UPDATE"; public static final String EVENT_SNAPSHOT_POLICY_DELETE = "SNAPSHOTPOLICY.DELETE"; @@ -387,7 +388,7 @@ public class EventTypes { public static final String EVENT_RESOURCE_DETAILS_CREATE = "CREATE_RESOURCE_DETAILS"; public static final String EVENT_RESOURCE_DETAILS_DELETE = "DELETE_RESOURCE_DETAILS"; - // vm snapshot events + // vm snapshot events public static final String EVENT_VM_SNAPSHOT_CREATE = "VMSNAPSHOT.CREATE"; public static final String EVENT_VM_SNAPSHOT_DELETE = "VMSNAPSHOT.DELETE"; public static final String EVENT_VM_SNAPSHOT_REVERT = "VMSNAPSHOT.REVERTTO"; @@ -444,7 +445,7 @@ public class EventTypes { public static final String EVENT_DEDICATE_RESOURCE_RELEASE = "DEDICATE.RESOURCE.RELEASE"; public static final String EVENT_CLEANUP_VM_RESERVATION = "VM.RESERVATION.CLEANUP"; - + public static final String EVENT_UCS_ASSOCIATED_PROFILE = "UCS.ASSOCIATEPROFILE"; static { diff --git a/api/src/com/cloud/storage/VolumeApiService.java b/api/src/com/cloud/storage/VolumeApiService.java index e8fb8de9aca..4806ae7c06f 100644 --- a/api/src/com/cloud/storage/VolumeApiService.java +++ b/api/src/com/cloud/storage/VolumeApiService.java @@ -84,7 +84,7 @@ public interface VolumeApiService { Snapshot allocSnapshot(Long volumeId, Long policyId) throws ResourceAllocationException; - Volume updateVolume(long volumeId, String path, String state, Long storageId); + Volume updateVolume(long volumeId, String path, String state, Long storageId, Boolean displayVolume); /** * Extracts the volume to a particular location. diff --git a/api/src/com/cloud/storage/snapshot/SnapshotApiService.java b/api/src/com/cloud/storage/snapshot/SnapshotApiService.java index 23e65220ff9..4f135107f07 100644 --- a/api/src/com/cloud/storage/snapshot/SnapshotApiService.java +++ b/api/src/com/cloud/storage/snapshot/SnapshotApiService.java @@ -106,4 +106,6 @@ public interface SnapshotApiService { * @return */ Long getHostIdForSnapshotOperation(Volume vol); + + boolean revertSnapshot(Long snapshotId); } diff --git a/api/src/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java index 1e421a13d3f..a7b8dcd5439 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java @@ -49,6 +49,9 @@ public class UpdateDiskOfferingCmd extends BaseCmd{ @Parameter(name=ApiConstants.SORT_KEY, type=CommandType.INTEGER, description="sort key of the disk offering, integer") private Integer sortKey; + @Parameter(name=ApiConstants.DISPLAY_OFFERING, type=CommandType.BOOLEAN, description="an optional field, whether to display the offering to the end user or not.") + private Boolean displayOffering; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -69,8 +72,11 @@ public class UpdateDiskOfferingCmd extends BaseCmd{ return sortKey; } + public Boolean getDisplayOffering() { + return displayOffering; + } - ///////////////////////////////////////////////////// +///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java index 26351bb7755..ddf0391a905 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java @@ -59,7 +59,7 @@ public class ListStoragePoolsCmd extends BaseListCmd { @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneResponse.class, description="the Zone ID for the storage pool") private Long zoneId; - + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType = StoragePoolResponse.class, description="the ID of the storage pool") private Long id; @@ -109,6 +109,7 @@ public class ListStoragePoolsCmd extends BaseListCmd { return s_name; } + @Override public ApiCommandJobType getInstanceType() { return ApiCommandJobType.StoragePool; } diff --git a/api/src/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java b/api/src/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java new file mode 100644 index 00000000000..946eebd180f --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.api.command.user.snapshot; + +import com.cloud.event.EventTypes; +import com.cloud.storage.Snapshot; +import com.cloud.user.Account; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandJobType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "RevertSnapshot", description = "revert a volume snapshot.", responseObject = SnapshotResponse.class) +public class RevertSnapshotCmd extends BaseAsyncCmd { + private static final String s_name = "revertsnapshotresponse"; + @Parameter(name= ApiConstants.ID, type= BaseCmd.CommandType.UUID, entityType = SnapshotResponse.class, + required=true, description="The ID of the snapshot") + private Long id; + + public Long getId() { + return id; + } + + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Snapshot snapshot = _entityMgr.findById(Snapshot.class, getId()); + if (snapshot != null) { + return snapshot.getAccountId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public String getEventType() { + return EventTypes.EVENT_SNAPSHOT_REVERT; + } + + @Override + public String getEventDescription() { + return "revert snapshot: " + getId(); + } + + public ApiCommandJobType getInstanceType() { + return ApiCommandJobType.Snapshot; + } + + public Long getInstanceId() { + return getId(); + } + + @Override + public void execute(){ + CallContext.current().setEventDetails("Snapshot Id: "+getId()); + boolean result = _snapshotService.revertSnapshot(getId()); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to revert snapshot"); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java index bc17b2e22c0..b247e0f4528 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java @@ -35,7 +35,7 @@ import com.cloud.storage.Volume; @APICommand(name = "updateVolume", description="Updates the volume.", responseObject=VolumeResponse.class) public class UpdateVolumeCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(UpdateVolumeCmd.class.getName()); - private static final String s_name = "addVolumeresponse"; + private static final String s_name = "updatevolumeresponse"; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -54,6 +54,9 @@ public class UpdateVolumeCmd extends BaseAsyncCmd { @Parameter(name=ApiConstants.STATE, type=CommandType.STRING, description="The state of the volume", since="4.3") private String state; + @Parameter(name=ApiConstants.DISPLAY_VOLUME, type=CommandType.BOOLEAN, description="an optional field, whether to the display the volume to the end user or not.") + private Boolean displayVolume; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -73,9 +76,12 @@ public class UpdateVolumeCmd extends BaseAsyncCmd { public String getState() { return state; } - - ///////////////////////////////////////////////////// + public Boolean getDisplayVolume() { + return displayVolume; + } + +///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -126,7 +132,7 @@ public class UpdateVolumeCmd extends BaseAsyncCmd { @Override public void execute(){ CallContext.current().setEventDetails("Volume Id: "+getId()); - Volume result = _volumeService.updateVolume(getId(), getPath(), getState(), getStorageId()); + Volume result = _volumeService.updateVolume(getId(), getPath(), getState(), getStorageId(), getDisplayVolume()); if (result != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/response/VolumeResponse.java b/api/src/org/apache/cloudstack/api/response/VolumeResponse.java index 338fcaae5a4..d2ca37a6f8d 100644 --- a/api/src/org/apache/cloudstack/api/response/VolumeResponse.java +++ b/api/src/org/apache/cloudstack/api/response/VolumeResponse.java @@ -178,12 +178,26 @@ public class VolumeResponse extends BaseResponse implements ControlledViewEntity @Param(description="the status of the volume") private String status; - @SerializedName(ApiConstants.TAGS) @Param(description="the list of resource tags associated with volume", responseObject = ResourceTagResponse.class) + @SerializedName(ApiConstants.TAGS) + @Param(description="the list of resource tags associated with volume", responseObject = ResourceTagResponse.class) private Set tags; - @SerializedName(ApiConstants.DISPLAY_VOLUME) @Param(description="an optional field whether to the display the volume to the end user or not.") + @SerializedName(ApiConstants.DISPLAY_VOLUME) + @Param(description="an optional field whether to the display the volume to the end user or not.") private Boolean displayVm; + @SerializedName(ApiConstants.PATH) + @Param(description="The path of the volume") + private String path; + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + public VolumeResponse(){ tags = new LinkedHashSet(); } diff --git a/core/src/com/cloud/agent/api/AttachVolumeCommand.java b/core/src/com/cloud/agent/api/AttachVolumeCommand.java index 49b2a706b4b..e9276198dbf 100644 --- a/core/src/com/cloud/agent/api/AttachVolumeCommand.java +++ b/core/src/com/cloud/agent/api/AttachVolumeCommand.java @@ -25,6 +25,7 @@ public class AttachVolumeCommand extends Command { private StoragePoolType pooltype; private String volumePath; private String volumeName; + private Long volumeSize; private Long deviceId; private String chainInfo; private String poolUuid; @@ -45,13 +46,14 @@ public class AttachVolumeCommand extends Command { public AttachVolumeCommand(boolean attach, boolean managed, String vmName, StoragePoolType pooltype, String volumePath, String volumeName, - Long deviceId, String chainInfo) { + Long volumeSize, Long deviceId, String chainInfo) { this.attach = attach; this._managed = managed; this.vmName = vmName; this.pooltype = pooltype; this.volumePath = volumePath; this.volumeName = volumeName; + this.volumeSize = volumeSize; this.deviceId = deviceId; this.chainInfo = chainInfo; } @@ -85,6 +87,10 @@ public class AttachVolumeCommand extends Command { return volumeName; } + public Long getVolumeSize() { + return volumeSize; + } + public Long getDeviceId() { return deviceId; } diff --git a/core/src/com/cloud/storage/resource/StorageProcessor.java b/core/src/com/cloud/storage/resource/StorageProcessor.java index 5fa9f8a86e3..29f4a677375 100644 --- a/core/src/com/cloud/storage/resource/StorageProcessor.java +++ b/core/src/com/cloud/storage/resource/StorageProcessor.java @@ -23,8 +23,12 @@ import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import com.cloud.agent.api.Answer; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; public interface StorageProcessor { public Answer copyTemplateToPrimaryStorage(CopyCommand cmd); @@ -43,4 +47,6 @@ public interface StorageProcessor { public Answer deleteVolume(DeleteCommand cmd); public Answer createVolumeFromSnapshot(CopyCommand cmd); public Answer deleteSnapshot(DeleteCommand cmd); + Answer introduceObject(IntroduceObjectCmd cmd); + Answer forgetObject(ForgetObjectCmd cmd); } diff --git a/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java b/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java index ab9aa2a3ee6..002143f460e 100644 --- a/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java +++ b/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java @@ -24,6 +24,7 @@ import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.log4j.Logger; @@ -55,6 +56,8 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma return execute((AttachCommand)command); } else if (command instanceof DettachCommand) { return execute((DettachCommand)command); + } else if (command instanceof IntroduceObjectCmd) { + return processor.introduceObject((IntroduceObjectCmd)command); } return new Answer((Command)command, false, "not implemented yet"); } @@ -65,7 +68,7 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma DataStoreTO srcDataStore = srcData.getDataStore(); DataStoreTO destDataStore = destData.getDataStore(); - if ((srcData.getObjectType() == DataObjectType.TEMPLATE) && (srcDataStore instanceof NfsTO) && (destData.getDataStore().getRole() == DataStoreRole.Primary)) { + if ((srcData.getObjectType() == DataObjectType.TEMPLATE) && (destData.getObjectType() == DataObjectType.TEMPLATE && destData.getDataStore().getRole() == DataStoreRole.Primary)) { //copy template to primary storage return processor.copyTemplateToPrimaryStorage(cmd); } else if (srcData.getObjectType() == DataObjectType.TEMPLATE && srcDataStore.getRole() == DataStoreRole.Primary && destDataStore.getRole() == DataStoreRole.Primary) { @@ -80,18 +83,19 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma } else if (destData.getObjectType() == DataObjectType.TEMPLATE) { return processor.createTemplateFromVolume(cmd); } - } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) { + } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.SNAPSHOT && + destData.getDataStore().getRole() == DataStoreRole.Primary) { return processor.backupSnapshot(cmd); } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.VOLUME) { - return processor.createVolumeFromSnapshot(cmd); + return processor.createVolumeFromSnapshot(cmd); } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.TEMPLATE) { return processor.createTemplateFromSnapshot(cmd); } return new Answer(cmd, false, "not implemented yet"); } - - + + protected Answer execute(CreateObjectCommand cmd) { DataTO data = cmd.getData(); try { @@ -106,21 +110,21 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma return new CreateObjectAnswer(e.toString()); } } - + protected Answer execute(DeleteCommand cmd) { DataTO data = cmd.getData(); Answer answer = null; if (data.getObjectType() == DataObjectType.VOLUME) { answer = processor.deleteVolume(cmd); } else if (data.getObjectType() == DataObjectType.SNAPSHOT) { - answer = processor.deleteSnapshot(cmd); + answer = processor.deleteSnapshot(cmd); } else { answer = new Answer(cmd, false, "unsupported type"); } return answer; } - + protected Answer execute(AttachCommand cmd) { DiskTO disk = cmd.getDisk(); if (disk.getType() == Volume.Type.ISO) { @@ -129,7 +133,7 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma return processor.attachVolume(cmd); } } - + protected Answer execute(DettachCommand cmd) { DiskTO disk = cmd.getDisk(); if (disk.getType() == Volume.Type.ISO) { diff --git a/core/src/com/cloud/storage/template/S3TemplateDownloader.java b/core/src/com/cloud/storage/template/S3TemplateDownloader.java index dd595ea3c97..462b21b700b 100644 --- a/core/src/com/cloud/storage/template/S3TemplateDownloader.java +++ b/core/src/com/cloud/storage/template/S3TemplateDownloader.java @@ -47,8 +47,6 @@ import com.amazonaws.services.s3.model.ProgressEvent; import com.amazonaws.services.s3.model.ProgressListener; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.StorageClass; -import com.amazonaws.services.s3.transfer.TransferManager; -import com.amazonaws.services.s3.transfer.Upload; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; @@ -227,9 +225,6 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp // compute s3 key s3Key = join(asList(installPath, fileName), S3Utils.SEPARATOR); - // multi-part upload using S3 api to handle > 5G input stream - TransferManager tm = new TransferManager(S3Utils.acquireClient(s3)); - // download using S3 API ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(remoteSize); @@ -262,11 +257,19 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp } }); - // TransferManager processes all transfers asynchronously, - // so this call will return immediately. - Upload upload = tm.upload(putObjectRequest); - - upload.waitForCompletion(); + + if ( s3.isMultipartEnabled()){ + // use TransferManager to do multipart upload + S3Utils.mputObject(s3, putObjectRequest); + } else{ + // single part upload, with 5GB limit in Amazon + S3Utils.putObject(s3, putObjectRequest); + while (status != TemplateDownloader.Status.DOWNLOAD_FINISHED && + status != TemplateDownloader.Status.UNRECOVERABLE_ERROR && + status != TemplateDownloader.Status.ABORTED) { + // wait for completion + } + } // finished or aborted Date finish = new Date(); diff --git a/core/src/org/apache/cloudstack/storage/command/ForgetObjectCmd.java b/core/src/org/apache/cloudstack/storage/command/ForgetObjectCmd.java new file mode 100644 index 00000000000..58fb7802019 --- /dev/null +++ b/core/src/org/apache/cloudstack/storage/command/ForgetObjectCmd.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; + +public class ForgetObjectCmd extends Command implements StorageSubSystemCommand { + private DataTO dataTO; + public ForgetObjectCmd(DataTO data) { + this.dataTO = data; + } + + public DataTO getDataTO() { + return this.dataTO; + } + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/org/apache/cloudstack/storage/command/IntroduceObjectAnswer.java b/core/src/org/apache/cloudstack/storage/command/IntroduceObjectAnswer.java new file mode 100644 index 00000000000..03c74b8aaa0 --- /dev/null +++ b/core/src/org/apache/cloudstack/storage/command/IntroduceObjectAnswer.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataTO; + +public class IntroduceObjectAnswer extends Answer { + private DataTO dataTO; + public IntroduceObjectAnswer(DataTO dataTO) { + this.dataTO = dataTO; + } + + public DataTO getDataTO() { + return this.dataTO; + } +} diff --git a/core/src/org/apache/cloudstack/storage/command/IntroduceObjectCmd.java b/core/src/org/apache/cloudstack/storage/command/IntroduceObjectCmd.java new file mode 100644 index 00000000000..1aabed2d279 --- /dev/null +++ b/core/src/org/apache/cloudstack/storage/command/IntroduceObjectCmd.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; + +public class IntroduceObjectCmd extends Command implements StorageSubSystemCommand { + private DataTO dataTO; + public IntroduceObjectCmd(DataTO dataTO) { + this.dataTO = dataTO; + } + + public DataTO getDataTO() { + return this.dataTO; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/org/apache/cloudstack/storage/to/ImageStoreTO.java b/core/src/org/apache/cloudstack/storage/to/ImageStoreTO.java index 0037ea57242..ec6c24092d3 100644 --- a/core/src/org/apache/cloudstack/storage/to/ImageStoreTO.java +++ b/core/src/org/apache/cloudstack/storage/to/ImageStoreTO.java @@ -26,6 +26,7 @@ public class ImageStoreTO implements DataStoreTO { private String uri; private String providerName; private DataStoreRole role; + private String uuid; public ImageStoreTO() { @@ -76,4 +77,13 @@ public class ImageStoreTO implements DataStoreTO { return new StringBuilder("ImageStoreTO[type=").append(type).append("|provider=").append(providerName) .append("|role=").append(role).append("|uri=").append(uri).append("]").toString(); } + + @Override + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } } diff --git a/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index 5e870df3716..91d78a49350 100644 --- a/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -46,6 +46,7 @@ public class PrimaryDataStoreTO implements DataStoreTO { return this.id; } + @Override public String getUuid() { return this.uuid; } diff --git a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java index 0b2bb1f4f3f..5262d3b78a6 100644 --- a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java +++ b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java @@ -27,7 +27,7 @@ import com.cloud.storage.Storage.StoragePoolType; public class AttachVolumeAnswerTest { AttachVolumeCommand avc = new AttachVolumeCommand(true, false, "vmname", - StoragePoolType.Filesystem, "vPath", "vName", + StoragePoolType.Filesystem, "vPath", "vName", 1073741824L, 123456789L, "chainInfo"); AttachVolumeAnswer ava1 = new AttachVolumeAnswer(avc); String results = ""; diff --git a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java index 6f413c0268d..1c5caca5f5c 100644 --- a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java +++ b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java @@ -26,7 +26,7 @@ import com.cloud.storage.Storage.StoragePoolType; public class AttachVolumeCommandTest { AttachVolumeCommand avc = new AttachVolumeCommand(true, false, "vmname", - StoragePoolType.Filesystem, "vPath", "vName", + StoragePoolType.Filesystem, "vPath", "vName", 1073741824L, 123456789L, "chainInfo"); @Test diff --git a/debian/changelog b/debian/changelog index dc9c65d2066..d6af31f69dc 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -cloudstack (4.3.0) unstable; urgency=low +cloudstack (4.3.0-snapshot) unstable; urgency=low * Update the version to 4.3.0.snapshot diff --git a/debian/rules b/debian/rules index 8cae7f3d208..4edf8930605 100755 --- a/debian/rules +++ b/debian/rules @@ -12,6 +12,7 @@ DEBVERS := $(shell dpkg-parsechangelog | sed -n -e 's/^Version: //p') VERSION := $(shell echo '$(DEBVERS)' | sed -e 's/^[[:digit:]]*://' -e 's/[~-].*//') +MVNADD := $(shell if echo '$(DEBVERS)' | grep -q snapshot; then echo -SNAPSHOT; fi ) PACKAGE = $(shell dh_listpackages|head -n 1|cut -d '-' -f 1) SYSCONFDIR = "/etc" DESTDIR = "debian/tmp" @@ -65,8 +66,8 @@ install: mkdir $(DESTDIR)/var/log/$(PACKAGE)/agent mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent/plugins - install -D agent/target/cloud-agent-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/$(PACKAGE)-agent.jar - install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ + install -D agent/target/cloud-agent-$(VERSION)$(MVNADD).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/$(PACKAGE)-agent.jar + install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION)$(MVNADD).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ install -D plugins/hypervisors/kvm/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ install -D packaging/debian/init/cloud-agent $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-agent install -D agent/target/transformed/cloud-setup-agent $(DESTDIR)/usr/bin/cloudstack-setup-agent @@ -92,7 +93,7 @@ install: mkdir $(DESTDIR)/var/lib/$(PACKAGE)/management mkdir $(DESTDIR)/var/lib/$(PACKAGE)/mnt cp -r client/target/utilities/scripts/db/* $(DESTDIR)/usr/share/$(PACKAGE)-management/setup/ - cp -r client/target/cloud-client-ui-$(VERSION)-SNAPSHOT/* $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client/ + cp -r client/target/cloud-client-ui-$(VERSION)$(MVNADD)/* $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client/ cp server/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/ cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/ @@ -145,7 +146,7 @@ install: mkdir $(DESTDIR)/var/log/$(PACKAGE)/usage mkdir $(DESTDIR)/usr/share/$(PACKAGE)-usage mkdir $(DESTDIR)/usr/share/$(PACKAGE)-usage/plugins - install -D usage/target/cloud-usage-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/$(PACKAGE)-usage.jar + install -D usage/target/cloud-usage-$(VERSION)$(MVNADD).jar $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/$(PACKAGE)-usage.jar install -D usage/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/ cp usage/target/transformed/db.properties $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/ cp usage/target/transformed/log4j-cloud_usage.xml $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/log4j-cloud.xml @@ -158,7 +159,7 @@ install: mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-bridge/webapps/awsapi mkdir $(DESTDIR)/usr/share/$(PACKAGE)-bridge/setup ln -s /usr/share/$(PACKAGE)-bridge/webapps/awsapi $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps7080/awsapi - cp -r awsapi/target/cloud-awsapi-$(VERSION)-SNAPSHOT/* $(DESTDIR)/usr/share/$(PACKAGE)-bridge/webapps/awsapi + cp -r awsapi/target/cloud-awsapi-$(VERSION)$(MVNADD)/* $(DESTDIR)/usr/share/$(PACKAGE)-bridge/webapps/awsapi install -D awsapi-setup/setup/cloud-setup-bridge $(DESTDIR)/usr/bin/cloudstack-setup-bridge install -D awsapi-setup/setup/cloudstack-aws-api-register $(DESTDIR)/usr/bin/cloudstack-aws-api-register cp -r awsapi-setup/db/mysql/* $(DESTDIR)/usr/share/$(PACKAGE)-bridge/setup diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java index ca0cc2c970a..b812f6efd99 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java @@ -28,4 +28,6 @@ public interface EndPointSelector { EndPoint select(DataStore store); List selectAll(DataStore store); + + EndPoint select(Scope scope, Long storeId); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java index d594a0728cb..e953eb6e21b 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java @@ -24,5 +24,5 @@ public interface SnapshotService { boolean deleteSnapshot(SnapshotInfo snapshot); - boolean revertSnapshot(SnapshotInfo snapshot); + boolean revertSnapshot(Long snapshotId); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java index 86ae532e2dc..47e595be6a6 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.apache.cloudstack.engine.subsystem.api.storage; @@ -25,5 +25,7 @@ public interface SnapshotStrategy { boolean deleteSnapshot(Long snapshotId); + boolean revertSnapshot(Long snapshotId); + boolean canHandle(Snapshot snapshot); } diff --git a/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index 48f096ade22..2b9e5419376 100755 --- a/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -43,16 +43,13 @@ import javax.naming.ConfigurationException; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; -import org.apache.log4j.Logger; - -import com.google.gson.Gson; - import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -86,7 +83,6 @@ import com.cloud.host.Status.Event; import com.cloud.resource.ServerResource; import com.cloud.serializer.GsonHelper; import com.cloud.utils.DateUtil; -import com.cloud.utils.Profiler; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; @@ -94,36 +90,35 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.nio.Link; import com.cloud.utils.nio.Task; +import com.google.gson.Gson; @Local(value = { AgentManager.class, ClusteredAgentRebalanceService.class }) public class ClusteredAgentManagerImpl extends AgentManagerImpl implements ClusterManagerListener, ClusteredAgentRebalanceService { final static Logger s_logger = Logger.getLogger(ClusteredAgentManagerImpl.class); - private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Cluster-AgentTransferExecutor")); + private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor")); private final long rebalanceTimeOut = 300000; // 5 mins - after this time remove the agent from the transfer list public final static long STARTUP_DELAY = 5000; public final static long SCAN_INTERVAL = 90000; // 90 seconds, it takes 60 sec for xenserver to fail login public final static int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds protected Set _agentToTransferIds = new HashSet(); - Gson _gson; - - @Inject - protected ClusterManager _clusterMgr = null; - protected HashMap _peers; protected HashMap _sslEngines; private final Timer _timer = new Timer("ClusteredAgentManager Timer"); - + private final Timer _agentLbTimer = new Timer("ClusteredAgentManager AgentRebalancing Timer"); + boolean _agentLbHappened = false; + + @Inject + protected ClusterManager _clusterMgr = null; @Inject protected ManagementServerHostDao _mshostDao; @Inject protected HostTransferMapDao _hostTransferDao; - - // @com.cloud.utils.component.Inject(adapter = AgentLoadBalancerPlanner.class) - @Inject protected List _lbPlanners; - - @Inject ConfigurationDao _configDao; + @Inject + protected List _lbPlanners; + @Inject + ConfigurationDao _configDao; @Inject ConfigDepot _configDepot; @@ -168,9 +163,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (s_logger.isDebugEnabled()) { s_logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds"); } - - // schedule transfer scan executor - if agent LB is enabled + + // Schedule tasks for agent rebalancing if (isAgentRebalanceEnabled()) { + s_transferExecutor.scheduleAtFixedRate(getAgentRebalanceScanTask(), 60000, 60000, TimeUnit.MILLISECONDS); s_transferExecutor.scheduleAtFixedRate(getTransferScanTask(), 60000, ClusteredAgentRebalanceService.DEFAULT_TRANSFER_CHECK_INTERVAL, TimeUnit.MILLISECONDS); } @@ -571,6 +567,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } _timer.cancel(); + _agentLbTimer.cancel(); //cancel all transfer tasks s_transferExecutor.shutdownNow(); @@ -1354,44 +1351,52 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } public boolean rebalanceAgent(long agentId, Event event, long currentOwnerId, long futureOwnerId) throws AgentUnavailableException, OperationTimedoutException { - return _rebalanceService.executeRebalanceRequest(agentId, currentOwnerId, futureOwnerId, event); + return executeRebalanceRequest(agentId, currentOwnerId, futureOwnerId, event); } public boolean isAgentRebalanceEnabled() { return EnableLB.value(); } - - private ClusteredAgentRebalanceService _rebalanceService; - - boolean _agentLbHappened = false; - public void agentrebalance() { - Profiler profilerAgentLB = new Profiler(); - profilerAgentLB.start(); - //initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold - if (EnableLB.value() && !_agentLbHappened) { - QueryBuilder sc = QueryBuilder.create(HostVO.class); - sc.and(sc.entity().getManagementServerId(), Op.NNULL); - sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); - List allManagedRoutingAgents = sc.list(); - - sc = QueryBuilder.create(HostVO.class); - sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); - List allAgents = sc.list(); - double allHostsCount = allAgents.size(); - double managedHostsCount = allManagedRoutingAgents.size(); - if (allHostsCount > 0.0) { - double load = managedHostsCount / allHostsCount; - if (load >= ConnectedAgentThreshold.value()) { - s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value()); - _rebalanceService.scheduleRebalanceAgents(); - _agentLbHappened = true; - } else { - s_logger.trace("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + ConnectedAgentThreshold.value()); + + + private Runnable getAgentRebalanceScanTask() { + return new ManagedContextRunnable() { + @Override + protected void runInContext() { + try { + if (s_logger.isTraceEnabled()) { + s_logger.trace("Agent rebalance task check, management server id:" + _nodeId); } + //initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold + if (!_agentLbHappened) { + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getManagementServerId(), Op.NNULL); + sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); + List allManagedRoutingAgents = sc.list(); + + sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); + List allAgents = sc.list(); + double allHostsCount = allAgents.size(); + double managedHostsCount = allManagedRoutingAgents.size(); + if (allHostsCount > 0.0) { + double load = managedHostsCount / allHostsCount; + if (load >= ConnectedAgentThreshold.value()) { + s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value()); + scheduleRebalanceAgents(); + _agentLbHappened = true; + } else { + s_logger.debug("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + ConnectedAgentThreshold.value()); + } + } + } + } catch (Throwable e) { + s_logger.error("Problem with the clustered agent transfer scan check!", e); } } - profilerAgentLB.stop(); - } + }; +} + @Override public void rescan() { diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java index 4bbc601c8b9..8debe2e4c1a 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java @@ -60,7 +60,7 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component(value="EngineHostDao") @Local(value = { EngineHostDao.class }) -@DB(txn = false) +@DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class EngineHostDaoImpl extends GenericDaoBase implements EngineHostDao { private static final Logger s_logger = Logger.getLogger(EngineHostDaoImpl.class); diff --git a/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java index f071cea60e6..fdd5a287b40 100644 --- a/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java +++ b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java @@ -17,9 +17,6 @@ package com.cloud.certificate.dao; import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.IOException; import javax.ejb.Local; @@ -32,7 +29,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Component -@Local(value={CertificateDao.class}) @DB(txn=false) +@Local(value={CertificateDao.class}) @DB public class CertificateDaoImpl extends GenericDaoBase implements CertificateDao { private static final Logger s_logger = Logger.getLogger(CertificateDaoImpl.class); diff --git a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java index cff4cfc1b95..8a43d23fedb 100644 --- a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java +++ b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java @@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = { HostTransferMapDao.class }) -@DB(txn = false) +@DB public class HostTransferMapDaoImpl extends GenericDaoBase implements HostTransferMapDao { private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class); diff --git a/engine/schema/src/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java index b12fa9dc007..c3d3f2a9352 100644 --- a/engine/schema/src/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.Transaction; @Component @Local(value=ClusterVSMMapDao.class) -@DB(txn = false) +@DB public class ClusterVSMMapDaoImpl extends GenericDaoBase implements ClusterVSMMapDao { final SearchBuilder ClusterSearch; diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java index 353402d30cf..0ec2bb50c7b 100755 --- a/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java @@ -38,7 +38,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @Component -@Local(value={DataCenterIpAddressDao.class}) @DB(txn=false) +@Local(value={DataCenterIpAddressDao.class}) @DB public class DataCenterIpAddressDaoImpl extends GenericDaoBase implements DataCenterIpAddressDao { private static final Logger s_logger = Logger.getLogger(DataCenterIpAddressDaoImpl.class); diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java index b52e3733a3f..6baf04fb8f9 100644 --- a/engine/schema/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java @@ -30,7 +30,6 @@ import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterLinkLocalIpAddressVO; import com.cloud.utils.db.DB; -import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; @@ -41,7 +40,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @Component -@Local(value={DataCenterLinkLocalIpAddressDaoImpl.class}) @DB(txn=false) +@Local(value={DataCenterLinkLocalIpAddressDaoImpl.class}) @DB public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase implements DataCenterLinkLocalIpAddressDao { private static final Logger s_logger = Logger.getLogger(DataCenterLinkLocalIpAddressDaoImpl.class); diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java index d3a2409dc96..c052026eb2d 100755 --- a/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java @@ -25,14 +25,12 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.exception.InvalidParameterValueException; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterVnetVO; import com.cloud.network.dao.AccountGuestVlanMapDao; import com.cloud.network.dao.AccountGuestVlanMapVO; import com.cloud.utils.db.DB; -import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; @@ -48,7 +46,7 @@ import com.cloud.utils.exception.CloudRuntimeException; * data center/physical_network and the vnet that appears within the physical network. */ @Component -@DB(txn=false) +@DB public class DataCenterVnetDaoImpl extends GenericDaoBase implements DataCenterVnetDao { private final SearchBuilder FreeVnetSearch; diff --git a/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpAddressDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpAddressDaoImpl.java index e78533f7821..a7bb9c1c005 100755 --- a/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpAddressDaoImpl.java @@ -18,17 +18,13 @@ package com.cloud.dc.dao; import java.util.Date; import java.util.List; -import java.util.Map; import javax.ejb.Local; -import javax.naming.ConfigurationException; import org.springframework.stereotype.Component; -import com.cloud.dc.DataCenterIpAddressVO; import com.cloud.dc.StorageNetworkIpAddressVO; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; @@ -37,10 +33,11 @@ import com.cloud.utils.db.GenericQueryBuilder; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; @Component @Local(value={StorageNetworkIpAddressDao.class}) -@DB(txn=false) +@DB public class StorageNetworkIpAddressDaoImpl extends GenericDaoBase implements StorageNetworkIpAddressDao { protected final GenericSearchBuilder countInUserIp; protected final GenericSearchBuilder listInUseIp; diff --git a/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpRangeDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpRangeDaoImpl.java index e1605d56334..517099a1c63 100755 --- a/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpRangeDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpRangeDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria.Op; @Component @Local(value={StorageNetworkIpRangeDao.class}) -@DB(txn=false) +@DB public class StorageNetworkIpRangeDaoImpl extends GenericDaoBase implements StorageNetworkIpRangeDao { protected final GenericSearchBuilder countRanges; diff --git a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java index 47168102a56..caf7c014bd0 100755 --- a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java @@ -65,7 +65,7 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {HostDao.class}) -@DB(txn = false) +@DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class HostDaoImpl extends GenericDaoBase implements HostDao { //FIXME: , ExternalIdDao { private static final Logger s_logger = Logger.getLogger(HostDaoImpl.class); diff --git a/engine/schema/src/com/cloud/network/dao/AccountGuestVlanMapDaoImpl.java b/engine/schema/src/com/cloud/network/dao/AccountGuestVlanMapDaoImpl.java index e7a7b34d9bd..85f37c9c8f0 100644 --- a/engine/schema/src/com/cloud/network/dao/AccountGuestVlanMapDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/AccountGuestVlanMapDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value={AccountGuestVlanMapDao.class}) -@DB(txn=false) +@DB public class AccountGuestVlanMapDaoImpl extends GenericDaoBase implements AccountGuestVlanMapDao { protected SearchBuilder AccountSearch; diff --git a/engine/schema/src/com/cloud/network/dao/ExternalFirewallDeviceDaoImpl.java b/engine/schema/src/com/cloud/network/dao/ExternalFirewallDeviceDaoImpl.java index 01f8861f9d1..f0e40c1512b 100644 --- a/engine/schema/src/com/cloud/network/dao/ExternalFirewallDeviceDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/ExternalFirewallDeviceDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=ExternalFirewallDeviceDao.class) @DB(txn=false) +@Local(value=ExternalFirewallDeviceDao.class) @DB public class ExternalFirewallDeviceDaoImpl extends GenericDaoBase implements ExternalFirewallDeviceDao { final SearchBuilder physicalNetworkServiceProviderSearch; final SearchBuilder physicalNetworkIdSearch; diff --git a/engine/schema/src/com/cloud/network/dao/ExternalLoadBalancerDeviceDaoImpl.java b/engine/schema/src/com/cloud/network/dao/ExternalLoadBalancerDeviceDaoImpl.java index ea6437dc2c3..e8ef0d22420 100644 --- a/engine/schema/src/com/cloud/network/dao/ExternalLoadBalancerDeviceDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/ExternalLoadBalancerDeviceDaoImpl.java @@ -29,7 +29,7 @@ import javax.ejb.Local; import java.util.List; @Component -@Local(value=ExternalLoadBalancerDeviceDao.class) @DB(txn=false) +@Local(value=ExternalLoadBalancerDeviceDao.class) @DB public class ExternalLoadBalancerDeviceDaoImpl extends GenericDaoBase implements ExternalLoadBalancerDeviceDao { final SearchBuilder physicalNetworkIdSearch; final SearchBuilder physicalNetworkServiceProviderSearch; diff --git a/engine/schema/src/com/cloud/network/dao/FirewallRulesDaoImpl.java b/engine/schema/src/com/cloud/network/dao/FirewallRulesDaoImpl.java index 41f911ca1d1..cd7878f08c8 100644 --- a/engine/schema/src/com/cloud/network/dao/FirewallRulesDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/FirewallRulesDaoImpl.java @@ -31,7 +31,6 @@ import com.cloud.network.rules.FirewallRule.TrafficType; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -44,7 +43,7 @@ import com.cloud.utils.db.Transaction; @Component @Local(value = FirewallRulesDao.class) -@DB(txn = false) +@DB public class FirewallRulesDaoImpl extends GenericDaoBase implements FirewallRulesDao { protected final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java index 23ccba9b03a..72b357d659c 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java @@ -40,10 +40,8 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; -import com.cloud.offerings.dao.NetworkOfferingDaoImpl; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.db.*; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchCriteria.Func; @@ -52,7 +50,7 @@ import com.cloud.utils.net.NetUtils; @Component @Local(value = NetworkDao.class) -@DB(txn = false) +@DB() public class NetworkDaoImpl extends GenericDaoBase implements NetworkDao { SearchBuilder AllFieldsSearch; SearchBuilder AccountSearch; diff --git a/engine/schema/src/com/cloud/network/dao/NetworkDomainDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkDomainDaoImpl.java index bbb920337c8..882c7fecdb4 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkDomainDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkDomainDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=NetworkDomainDao.class) @DB(txn=false) +@Local(value=NetworkDomainDao.class) @DB() public class NetworkDomainDaoImpl extends GenericDaoBase implements NetworkDomainDao { final SearchBuilder AllFieldsSearch; final SearchBuilder DomainsSearch; diff --git a/engine/schema/src/com/cloud/network/dao/NetworkExternalFirewallDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkExternalFirewallDaoImpl.java index b1767609429..9a2bd76b314 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkExternalFirewallDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkExternalFirewallDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=NetworkExternalFirewallDao.class) @DB(txn=false) +@Local(value=NetworkExternalFirewallDao.class) @DB() public class NetworkExternalFirewallDaoImpl extends GenericDaoBase implements NetworkExternalFirewallDao { final SearchBuilder networkIdSearch; diff --git a/engine/schema/src/com/cloud/network/dao/NetworkExternalLoadBalancerDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkExternalLoadBalancerDaoImpl.java index c29c164fd28..8d77a5b73c8 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkExternalLoadBalancerDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkExternalLoadBalancerDaoImpl.java @@ -29,7 +29,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=NetworkExternalLoadBalancerDao.class) @DB(txn=false) +@Local(value=NetworkExternalLoadBalancerDao.class) @DB() public class NetworkExternalLoadBalancerDaoImpl extends GenericDaoBase implements NetworkExternalLoadBalancerDao { final SearchBuilder networkIdSearch; diff --git a/engine/schema/src/com/cloud/network/dao/NetworkServiceMapDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkServiceMapDaoImpl.java index a4ebd8c1b45..d1402a9e942 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkServiceMapDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkServiceMapDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @Component -@Local(value=NetworkServiceMapDao.class) @DB(txn=false) +@Local(value=NetworkServiceMapDao.class) @DB() public class NetworkServiceMapDaoImpl extends GenericDaoBase implements NetworkServiceMapDao { final SearchBuilder AllFieldsSearch; final SearchBuilder MultipleServicesSearch; diff --git a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java index 1e26a51cead..8be29a436d2 100644 --- a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=PhysicalNetworkDao.class) @DB(txn=false) +@Local(value=PhysicalNetworkDao.class) @DB() public class PhysicalNetworkDaoImpl extends GenericDaoBase implements PhysicalNetworkDao { final SearchBuilder ZoneSearch; diff --git a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkServiceProviderDaoImpl.java b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkServiceProviderDaoImpl.java index 16a23dd8fc9..bfe7d25e068 100644 --- a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkServiceProviderDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkServiceProviderDaoImpl.java @@ -31,7 +31,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=PhysicalNetworkServiceProviderDao.class) @DB(txn=false) +@Local(value=PhysicalNetworkServiceProviderDao.class) @DB() public class PhysicalNetworkServiceProviderDaoImpl extends GenericDaoBase implements PhysicalNetworkServiceProviderDao { final SearchBuilder physicalNetworkSearch; final SearchBuilder physicalNetworkServiceProviderSearch; diff --git a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java index 910616cb5bf..2a712a6127d 100755 --- a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=PhysicalNetworkTrafficTypeDao.class) @DB(txn=false) +@Local(value=PhysicalNetworkTrafficTypeDao.class) @DB() public class PhysicalNetworkTrafficTypeDaoImpl extends GenericDaoBase implements PhysicalNetworkTrafficTypeDao { final SearchBuilder physicalNetworkSearch; final GenericSearchBuilder kvmAllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java b/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java index 61fe52a23bc..2325c4a9705 100644 --- a/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java @@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.exception.CloudRuntimeException; @Component -@Local(value=PortProfileDao.class) @DB(txn=false) +@Local(value=PortProfileDao.class) @DB() public class PortProfileDaoImpl extends GenericDaoBase implements PortProfileDao { protected static final Logger s_logger = Logger.getLogger(PortProfileDaoImpl.class); diff --git a/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDaoImpl.java b/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDaoImpl.java index 1c5d27e265f..8dce4e42ea7 100644 --- a/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @Component -@Local(value=VirtualRouterProviderDao.class) @DB(txn=false) +@Local(value=VirtualRouterProviderDao.class) @DB() public class VirtualRouterProviderDaoImpl extends GenericDaoBase implements VirtualRouterProviderDao { final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLDaoImpl.java index fd3308d176f..5a2eeede193 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLDaoImpl.java @@ -19,14 +19,13 @@ package com.cloud.network.vpc.dao; import com.cloud.network.vpc.NetworkACLVO; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; import org.springframework.stereotype.Component; import javax.ejb.Local; @Component @Local(value = NetworkACLDao.class) -@DB(txn = false) +@DB() public class NetworkACLDaoImpl extends GenericDaoBase implements NetworkACLDao{ protected NetworkACLDaoImpl() { diff --git a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java index 8162ce85ca1..6bd47bd679f 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java @@ -28,7 +28,7 @@ import java.util.List; @Component @Local(value = NetworkACLItemDao.class) -@DB(txn = false) +@DB() public class NetworkACLItemDaoImpl extends GenericDaoBase implements NetworkACLItemDao { protected final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java index fe435c05175..5ed7fb2f757 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java @@ -36,7 +36,7 @@ import com.cloud.utils.db.Transaction; @Component @Local(value = PrivateIpDao.class) -@DB(txn = false) +@DB() public class PrivateIpDaoImpl extends GenericDaoBase implements PrivateIpDao { private static final Logger s_logger = Logger.getLogger(PrivateIpDaoImpl.class); diff --git a/engine/schema/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java index 518237d96b3..e7e006e852e 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java @@ -40,7 +40,7 @@ import com.cloud.utils.db.Transaction; @Component @Local(value = StaticRouteDao.class) -@DB(txn = false) +@DB() public class StaticRouteDaoImpl extends GenericDaoBase implements StaticRouteDao{ protected final SearchBuilder AllFieldsSearch; protected final SearchBuilder NotRevokedSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcDaoImpl.java index 12868fef8ba..289b896899a 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcDaoImpl.java @@ -42,7 +42,7 @@ import com.cloud.utils.db.Transaction; @Component @Local(value = VpcDao.class) -@DB(txn = false) +@DB() public class VpcDaoImpl extends GenericDaoBase implements VpcDao{ final GenericSearchBuilder CountByOfferingId; final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcGatewayDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcGatewayDaoImpl.java index 13c37c4e0e6..e718209529d 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcGatewayDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcGatewayDaoImpl.java @@ -31,7 +31,7 @@ import java.util.List; @Component @Local(value = VpcGatewayDao.class) -@DB(txn = false) +@DB() public class VpcGatewayDaoImpl extends GenericDaoBase implements VpcGatewayDao{ protected final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java index 2cda5471c14..b5a7e4dd7bf 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.Transaction; @Component @Local(value = VpcOfferingDao.class) -@DB(txn = false) +@DB() public class VpcOfferingDaoImpl extends GenericDaoBase implements VpcOfferingDao{ final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingServiceMapDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingServiceMapDaoImpl.java index 4b5f1b9620b..8b503c9146d 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingServiceMapDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingServiceMapDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria.Func; @Component @Local(value = VpcOfferingServiceMapDao.class) -@DB(txn = false) +@DB() public class VpcOfferingServiceMapDaoImpl extends GenericDaoBase implements VpcOfferingServiceMapDao{ final SearchBuilder AllFieldsSearch; final SearchBuilder MultipleServicesSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java index 41e8d912e34..227694fe159 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria; import org.springframework.stereotype.Component; @Component -@Local(value=VpcServiceMapDao.class) @DB(txn=false) +@Local(value=VpcServiceMapDao.class) @DB() public class VpcServiceMapDaoImpl extends GenericDaoBase implements VpcServiceMapDao { final SearchBuilder AllFieldsSearch; final SearchBuilder MultipleServicesSearch; diff --git a/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java b/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java index 84324734072..1c0a1d74743 100644 --- a/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java @@ -42,7 +42,7 @@ import com.cloud.utils.db.Transaction; @Component @Local(value = NetworkOfferingDao.class) -@DB(txn = false) +@DB() public class NetworkOfferingDaoImpl extends GenericDaoBase implements NetworkOfferingDao { final SearchBuilder NameSearch; final SearchBuilder SystemOfferingSearch; diff --git a/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java b/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java index d2fbfe96319..6694eb826b9 100644 --- a/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java +++ b/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java @@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; @Component -@Local(value=NetworkOfferingServiceMapDao.class) @DB(txn=false) +@Local(value=NetworkOfferingServiceMapDao.class) @DB() public class NetworkOfferingServiceMapDaoImpl extends GenericDaoBase implements NetworkOfferingServiceMapDao { final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 14b2abf8fc4..b7890a41af9 100644 --- a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @Component -@Local(value={ServiceOfferingDao.class}) @DB(txn=false) +@Local(value={ServiceOfferingDao.class}) @DB() public class ServiceOfferingDaoImpl extends GenericDaoBase implements ServiceOfferingDao { protected static final Logger s_logger = Logger.getLogger(ServiceOfferingDaoImpl.class); diff --git a/engine/schema/src/com/cloud/storage/dao/StoragePoolWorkDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/StoragePoolWorkDaoImpl.java index 052bae4fc78..aa3e0df04ea 100644 --- a/engine/schema/src/com/cloud/storage/dao/StoragePoolWorkDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/StoragePoolWorkDaoImpl.java @@ -36,7 +36,7 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = { StoragePoolWorkDao.class }) -@DB(txn = false) +@DB() public class StoragePoolWorkDaoImpl extends GenericDaoBase implements StoragePoolWorkDao { protected final SearchBuilder PendingWorkForPrepareForMaintenanceSearch; diff --git a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java index bf284105685..2b796649551 100755 --- a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -354,7 +354,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } @Override - @DB(txn = false) + @DB() public Pair getCountAndTotalByPool(long poolId) { SearchCriteria sc = TotalSizeByPoolSearch.create(); sc.setParameters("poolId", poolId); @@ -506,7 +506,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } @Override - @DB(txn = false) + @DB() public Pair getNonDestroyedCountAndTotalByPool(long poolId) { SearchCriteria sc = TotalSizeByPoolSearch.create(); sc.setParameters("poolId", poolId); diff --git a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java index 2cdb5ec0736..8c92765e48b 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java +++ b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java @@ -40,7 +40,7 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = VersionDao.class) -@DB(txn = false) +@DB() public class VersionDaoImpl extends GenericDaoBase implements VersionDao { private static final Logger s_logger = Logger.getLogger(VersionDaoImpl.class); diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java index 174f28350d1..9d54b1ea3d8 100644 --- a/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java @@ -35,7 +35,7 @@ import com.cloud.utils.db.DB; @Component @Local(value= { UserVmCloneSettingDao.class }) -@DB(txn = false) +@DB() public class UserVmCloneSettingDaoImpl extends GenericDaoBase implements UserVmCloneSettingDao { public static final Logger s_logger = Logger.getLogger(UserVmCloneSettingDaoImpl.class); diff --git a/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerLbRuleMapDaoImpl.java b/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerLbRuleMapDaoImpl.java index 2a6e72b6913..421343cbd5b 100644 --- a/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerLbRuleMapDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerLbRuleMapDaoImpl.java @@ -28,7 +28,7 @@ import java.util.List; @Component @Local(value={GlobalLoadBalancerLbRuleMapDao.class}) -@DB(txn = false) +@DB() public class GlobalLoadBalancerLbRuleMapDaoImpl extends GenericDaoBase implements GlobalLoadBalancerLbRuleMapDao { private final SearchBuilder listByGslbRuleId; diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 54a107b5a92..9e2bb365518 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -44,7 +44,7 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @Local(value = { PrimaryDataStoreDao.class }) -@DB(txn = false) +@DB() public class PrimaryDataStoreDaoImpl extends GenericDaoBase implements PrimaryDataStoreDao { protected final SearchBuilder AllFieldSearch; protected final SearchBuilder DcPodSearch; diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index 96d1f5ab785..fb6962a60f1 100644 --- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -47,7 +47,7 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; - +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -62,9 +62,9 @@ import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.configuration.Config; import com.cloud.host.Host; import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.server.ManagementService; import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VolumeVO; @@ -81,7 +81,7 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component public class - AncientDataMotionStrategy implements DataMotionStrategy { +AncientDataMotionStrategy implements DataMotionStrategy { private static final Logger s_logger = Logger.getLogger(AncientDataMotionStrategy.class); @Inject EndPointSelector selector; @@ -138,7 +138,8 @@ public class DataTO destTO = destData.getTO(); DataStoreTO srcStoreTO = srcTO.getDataStore(); DataStoreTO destStoreTO = destTO.getDataStore(); - if (srcStoreTO instanceof NfsTO || srcStoreTO.getRole() == DataStoreRole.ImageCache) { + if (srcStoreTO instanceof NfsTO || srcStoreTO.getRole() == DataStoreRole.ImageCache || + (srcStoreTO instanceof PrimaryDataStoreTO && ((PrimaryDataStoreTO)srcStoreTO).getPoolType() == StoragePoolType.NetworkFilesystem)) { return false; } @@ -264,8 +265,14 @@ public class int _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CreateVolumeFromSnapshotWait.getDefaultValue())); + EndPoint ep = null; + if (srcData.getDataStore().getRole() == DataStoreRole.Primary) { + ep = selector.select(volObj); + } else { + ep = selector.select(snapObj, volObj); + } + CopyCommand cmd = new CopyCommand(srcData.getTO(), volObj.getTO(), _createVolumeFromSnapshotWait, _mgmtServer.getExecuteInSequence()); - EndPoint ep = selector.select(snapObj, volObj); Answer answer = ep.sendMessage(cmd); return answer; @@ -433,11 +440,17 @@ public class srcData = cacheSnapshotChain(snapshot); } + EndPoint ep = null; + if (srcData.getDataStore().getRole() == DataStoreRole.Primary) { + ep = selector.select(destData); + } else { + ep = selector.select(srcData, destData); + } + CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _createprivatetemplatefromsnapshotwait, _mgmtServer.getExecuteInSequence()); - EndPoint ep = selector.select(srcData, destData); Answer answer = ep.sendMessage(cmd); - - // clean up snapshot copied to staging + + // clean up snapshot copied to staging if (needCache && srcData != null) { cacheMgr.deleteCacheObject(srcData); } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java index 855d8cbfe0f..d77658b4314 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java @@ -24,7 +24,6 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; -import com.cloud.capacity.dao.CapacityDao; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider; @@ -39,9 +38,11 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.image.ImageStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; +import org.apache.cloudstack.storage.to.ImageStoreTO; import org.apache.log4j.Logger; import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.capacity.dao.CapacityDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.dao.VMTemplateDao; @@ -181,7 +182,16 @@ public class ImageStoreImpl implements ImageStoreEntity { @Override public DataStoreTO getTO() { - return getDriver().getStoreTO(this); + DataStoreTO to = getDriver().getStoreTO(this); + if (to == null) { + ImageStoreTO primaryTO = new ImageStoreTO(); + primaryTO.setProviderName(getProviderName()); + primaryTO.setRole(getRole()); + primaryTO.setType(getProtocol()); + primaryTO.setUri(getUri()); + return primaryTO; + } + return to; } @Override diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java index 2aaabdaf430..c73d1672d27 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java @@ -18,7 +18,50 @@ */ package org.apache.cloudstack.storage.test; -import com.cloud.cluster.LockMasterListener; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import javax.inject.Inject; + +import junit.framework.Assert; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.VolumeObject; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; @@ -29,6 +72,7 @@ import com.cloud.dc.dao.HostPodDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.org.Cluster; import com.cloud.org.Managed; +import com.cloud.server.LockMasterListener; import com.cloud.storage.CreateSnapshotPayload; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; @@ -47,53 +91,7 @@ import com.cloud.user.AccountManager; import com.cloud.user.User; import com.cloud.utils.DateUtil; import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.db.DB; import com.cloud.utils.db.Merovingian2; -import junit.framework.Assert; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; -import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.volume.VolumeObject; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mockito; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; - -import javax.inject.Inject; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -317,7 +315,7 @@ public class SnapshotTestWithFakeData { final VolumeInfo volumeInfo = createVolume(1L, store); Assert.assertTrue(volumeInfo.getState() == Volume.State.Ready); vol = volumeInfo; - // final SnapshotPolicyVO policyVO = createSnapshotPolicy(vol.getId()); + // final SnapshotPolicyVO policyVO = createSnapshotPolicy(vol.getId()); ExecutorService pool = Executors.newFixedThreadPool(2); @@ -325,7 +323,7 @@ public class SnapshotTestWithFakeData { List> future = new ArrayList>(); for(int i = 0; i < 12; i++) { final int cnt = i; - Future task = pool.submit(new Callable() { + Future task = pool.submit(new Callable() { @Override public Boolean call() throws Exception { boolean r = true; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index 3ead93f9c5f..c09adcad12d 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -17,20 +17,24 @@ package org.apache.cloudstack.storage.snapshot; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.Snapshot; -import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.snapshot.SnapshotManager; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import java.util.concurrent.ExecutionException; + +import javax.inject.Inject; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; -import org.apache.cloudstack.engine.subsystem.api.storage.*; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -41,13 +45,19 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import javax.inject.Inject; - -import java.util.concurrent.ExecutionException; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Snapshot; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Component public class SnapshotServiceImpl implements SnapshotService { @@ -383,7 +393,7 @@ public class SnapshotServiceImpl implements SnapshotService { } @Override - public boolean revertSnapshot(SnapshotInfo snapshot) { + public boolean revertSnapshot(Long snapshotId) { return false; } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java index 1b579227f84..6db8343214b 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.apache.cloudstack.storage.snapshot; @@ -35,4 +35,9 @@ public abstract class SnapshotStrategyBase implements SnapshotStrategy { public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) { return snapshotSvr.backupSnapshot(snapshot); } + + @Override + public boolean revertSnapshot(Long snapshotId) { + return snapshotSvr.revertSnapshot(snapshotId); + } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java index 60d9407f2a0..aae4cde115a 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java @@ -11,24 +11,27 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.apache.cloudstack.storage.snapshot; import javax.inject.Inject; -import com.cloud.storage.Volume; -import com.cloud.utils.db.DB; -import org.apache.cloudstack.engine.subsystem.api.storage.*; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -36,9 +39,11 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Volume; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; @@ -236,6 +241,11 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { return true; } + @Override + public boolean revertSnapshot(Long snapshotId) { + throw new CloudRuntimeException("revert Snapshot is not supported"); + } + @Override @DB public SnapshotInfo takeSnapshot(SnapshotInfo snapshot) { diff --git a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java index fdc12bf1cee..e7c66279e06 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java +++ b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java @@ -27,9 +27,6 @@ import java.util.List; import javax.inject.Inject; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; @@ -37,6 +34,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.storage.LocalHostEndpoint; import org.apache.cloudstack.storage.RemoteHostEndPoint; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -250,6 +249,11 @@ public class DefaultEndPointSelector implements EndPointSelector { } } + @Override + public EndPoint select(Scope scope, Long storeId) { + return findEndPointInScope(scope, findOneHostOnPrimaryStorage, storeId); + } + @Override public List selectAll(DataStore store) { List endPoints = new ArrayList(); diff --git a/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelper.java b/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelper.java new file mode 100644 index 00000000000..40ced1d832c --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelper.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.helper; + +import com.cloud.agent.api.to.DataTO; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; + +public interface HypervisorHelper { + DataTO introduceObject(DataTO object, Scope scope, Long storeId); + boolean forgetObject(DataTO object, Scope scope, Long storeId); + SnapshotObjectTO takeSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope); + boolean revertSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope); +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java b/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java new file mode 100644 index 00000000000..81e6f7c69c5 --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.helper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataTO; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectAnswer; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.log4j.Logger; + +import javax.inject.Inject; + +public class HypervisorHelperImpl implements HypervisorHelper { + private static final Logger s_logger = Logger.getLogger(HypervisorHelperImpl.class); + @Inject + EndPointSelector selector; + + @Override + public DataTO introduceObject(DataTO object, Scope scope, Long storeId) { + EndPoint ep = selector.select(scope, storeId); + IntroduceObjectCmd cmd = new IntroduceObjectCmd(object); + Answer answer = ep.sendMessage(cmd); + if (answer == null || !answer.getResult()) { + String errMsg = answer == null ? null : answer.getDetails(); + throw new CloudRuntimeException("Failed to introduce object, due to " + errMsg); + } + IntroduceObjectAnswer introduceObjectAnswer = (IntroduceObjectAnswer)answer; + return introduceObjectAnswer.getDataTO(); + } + + @Override + public boolean forgetObject(DataTO object, Scope scope, Long storeId) { + EndPoint ep = selector.select(scope, storeId); + ForgetObjectCmd cmd = new ForgetObjectCmd(object); + Answer answer = ep.sendMessage(cmd); + if (answer == null || !answer.getResult()) { + String errMsg = answer == null ? null : answer.getDetails(); + if (errMsg != null) { + s_logger.debug("Failed to forget object: " + errMsg); + } + return false; + } + return true; + } + + @Override + public SnapshotObjectTO takeSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope) { + return null; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public boolean revertSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope) { + return false; //To change body of implemented methods use File | Settings | File Templates. + } +} diff --git a/framework/db/src/com/cloud/utils/db/DB.java b/framework/db/src/com/cloud/utils/db/DB.java index f83a7ea7eb4..b67f93e814a 100644 --- a/framework/db/src/com/cloud/utils/db/DB.java +++ b/framework/db/src/com/cloud/utils/db/DB.java @@ -36,24 +36,8 @@ import java.lang.annotation.Target; * _dao.acquireInLockTable(id); * ... * _dao.releaseFromLockTable(id); - * - * 3. Annotate methods that are inside a DAO but doesn't use - * the Transaction class. Generally, these are methods - * that are utility methods for setting up searches. In - * this case use @DB(txn=false) to annotate the method. - * While this is not required, it helps when you're debugging - * the code and it saves on method calls during runtime. - * */ @Target({TYPE, METHOD}) @Retention(RUNTIME) public @interface DB { - /** - * (Optional) Specifies that the method - * does not use transaction. This is useful for - * utility methods within DAO classes which are - * automatically marked with @DB. By marking txn=false, - * the method is not surrounded with transaction code. - */ - boolean txn() default true; } diff --git a/framework/db/src/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/com/cloud/utils/db/GenericDaoBase.java index ad8c770f904..b4c02cf8e1b 100755 --- a/framework/db/src/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/com/cloud/utils/db/GenericDaoBase.java @@ -168,7 +168,7 @@ public abstract class GenericDaoBase extends Compone } @Override - @SuppressWarnings("unchecked") @DB(txn=false) + @SuppressWarnings("unchecked") @DB() public GenericSearchBuilder createSearchBuilder(Class resultType) { return new GenericSearchBuilder(_entityBeanType, resultType); } @@ -282,7 +282,7 @@ public abstract class GenericDaoBase extends Compone setRunLevel(ComponentLifecycle.RUN_LEVEL_SYSTEM); } - @Override @DB(txn=false) + @Override @DB() @SuppressWarnings("unchecked") public T createForUpdate(final ID id) { final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(this)}); @@ -296,12 +296,12 @@ public abstract class GenericDaoBase extends Compone return entity; } - @Override @DB(txn=false) + @Override @DB() public T createForUpdate() { return createForUpdate(null); } - @Override @DB(txn=false) + @Override @DB() public K getNextInSequence(final Class clazz, final String name) { final TableGenerator tg = _tgs.get(name); assert (tg != null) : "Couldn't find Table generator using " + name; @@ -309,7 +309,7 @@ public abstract class GenericDaoBase extends Compone return s_seqFetcher.getNextSequence(clazz, tg); } - @Override @DB(txn=false) + @Override @DB() public K getRandomlyIncreasingNextInSequence(final Class clazz, final String name) { final TableGenerator tg = _tgs.get(name); assert (tg != null) : "Couldn't find Table generator using " + name; @@ -317,19 +317,19 @@ public abstract class GenericDaoBase extends Compone return s_seqFetcher.getRandomNextSequence(clazz, tg); } - @Override @DB(txn=false) + @Override @DB() public List lockRows(final SearchCriteria sc, final Filter filter, final boolean exclusive) { return search(sc, filter, exclusive, false); } - @Override @DB(txn=false) + @Override @DB() public T lockOneRandomRow(final SearchCriteria sc, final boolean exclusive) { final Filter filter = new Filter(1); final List beans = search(sc, filter, exclusive, true); return beans.isEmpty() ? null : beans.get(0); } - @DB(txn=false) + @DB() protected List search(SearchCriteria sc, final Filter filter, final Boolean lock, final boolean cache) { if (_removed != null) { if (sc == null) { @@ -340,7 +340,7 @@ public abstract class GenericDaoBase extends Compone return searchIncludingRemoved(sc, filter, lock, cache); } - @DB(txn=false) + @DB() protected List search(SearchCriteria sc, final Filter filter, final Boolean lock, final boolean cache, final boolean enable_query_cache) { if (_removed != null) { if (sc == null) { @@ -499,7 +499,7 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public List customSearch(SearchCriteria sc, final Filter filter) { if (_removed != null) { sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL); @@ -508,7 +508,7 @@ public abstract class GenericDaoBase extends Compone return customSearchIncludingRemoved(sc, filter); } - @DB(txn=false) + @DB() protected void setField(Object entity, Field field, ResultSet rs, int index) throws SQLException { try { final Class type = field.getType(); @@ -652,7 +652,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) @SuppressWarnings("unchecked") + @DB() @SuppressWarnings("unchecked") protected M getObject(Class type, ResultSet rs, int index) throws SQLException { if (type == String.class) { byte[] bytes = rs.getBytes(index); @@ -744,7 +744,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected int addJoinAttributes(int count, PreparedStatement pstmt, Collection>> joins) throws SQLException { for (JoinBuilder> join : joins) { for (final Pair value : join.getT().getValues()) { @@ -832,12 +832,12 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected Attribute findAttributeByFieldName(String name) { return _allAttributes.get(name); } - @DB(txn=false) + @DB() protected String buildSelectByIdSql(final StringBuilder sql) { if (_idField == null) { return null; @@ -857,13 +857,13 @@ public abstract class GenericDaoBase extends Compone return sql.toString(); } - @DB(txn=false) + @DB() @Override public Class getEntityBeanType() { return _entityBeanType; } - @DB(txn=false) + @DB() protected T findOneIncludingRemovedBy(final SearchCriteria sc) { Filter filter = new Filter(1); List results = searchIncludingRemoved(sc, filter, null, false); @@ -872,7 +872,7 @@ public abstract class GenericDaoBase extends Compone } @Override - @DB(txn=false) + @DB() public T findOneBy(final SearchCriteria sc) { if (_removed != null) { sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL); @@ -880,7 +880,7 @@ public abstract class GenericDaoBase extends Compone return findOneIncludingRemovedBy(sc); } - @DB(txn=false) + @DB() protected List listBy(final SearchCriteria sc, final Filter filter) { if (_removed != null) { sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL); @@ -888,7 +888,7 @@ public abstract class GenericDaoBase extends Compone return listIncludingRemovedBy(sc, filter); } - @DB(txn=false) + @DB() protected List listBy(final SearchCriteria sc, final Filter filter, final boolean enable_query_cache) { if (_removed != null) { sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL); @@ -896,27 +896,27 @@ public abstract class GenericDaoBase extends Compone return listIncludingRemovedBy(sc, filter, enable_query_cache); } - @DB(txn=false) + @DB() protected List listBy(final SearchCriteria sc) { return listBy(sc, null); } - @DB(txn=false) + @DB() protected List listIncludingRemovedBy(final SearchCriteria sc, final Filter filter, final boolean enable_query_cache) { return searchIncludingRemoved(sc, filter, null, false, enable_query_cache); } - @DB(txn=false) + @DB() protected List listIncludingRemovedBy(final SearchCriteria sc, final Filter filter) { return searchIncludingRemoved(sc, filter, null, false); } - @DB(txn=false) + @DB() protected List listIncludingRemovedBy(final SearchCriteria sc) { return listIncludingRemovedBy(sc, null); } - @Override @DB(txn=false) + @Override @DB() @SuppressWarnings("unchecked") public T findById(final ID id) { if (_cache != null) { @@ -927,26 +927,26 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public T findByUuid(final String uuid) { SearchCriteria sc = createSearchCriteria(); sc.addAnd("uuid", SearchCriteria.Op.EQ, uuid); return findOneBy(sc); } - @Override @DB(txn=false) + @Override @DB() public T findByUuidIncludingRemoved(final String uuid) { SearchCriteria sc = createSearchCriteria(); sc.addAnd("uuid", SearchCriteria.Op.EQ, uuid); return findOneIncludingRemovedBy(sc); } - @Override @DB(txn=false) + @Override @DB() public T findByIdIncludingRemoved(ID id) { return findById(id, true, null); } - @Override @DB(txn=false) + @Override @DB() public T findById(final ID id, boolean fresh) { if(!fresh) { return findById(id); @@ -958,7 +958,7 @@ public abstract class GenericDaoBase extends Compone return lockRow(id, null); } - @Override @DB(txn=false) + @Override @DB() public T lockRow(ID id, Boolean lock) { return findById(id, false, lock); } @@ -987,7 +987,7 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public T acquireInLockTable(ID id) { return acquireInLockTable(id, _timeoutSeconds); } @@ -1018,7 +1018,7 @@ public abstract class GenericDaoBase extends Compone return txn.release(_table + id); } - @Override @DB(txn=false) + @Override @DB() public boolean lockInLockTable(final String id) { return lockInLockTable(id, _timeoutSeconds); } @@ -1035,12 +1035,12 @@ public abstract class GenericDaoBase extends Compone return txn.release(_table + id); } - @Override @DB(txn=false) + @Override @DB() public List listAllIncludingRemoved() { return listAllIncludingRemoved(null); } - @DB(txn=false) + @DB() protected List addGroupBy(final StringBuilder sql, SearchCriteria sc) { Pair, List> groupBys = sc.getGroupBy(); if (groupBys != null) { @@ -1051,7 +1051,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected void addFilter(final StringBuilder sql, final Filter filter) { if (filter != null) { if (filter.getOrderBy() != null) { @@ -1067,7 +1067,7 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public List listAllIncludingRemoved(final Filter filter) { final StringBuilder sql = createPartialSelectSql(null, false); addFilter(sql, filter); @@ -1098,12 +1098,12 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public List listAll() { return listAll(null); } - @Override @DB(txn=false) + @Override @DB() public List listAll(final Filter filter) { if (_removed == null) { return listAllIncludingRemoved(filter); @@ -1174,7 +1174,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected StringBuilder createPartialSelectSql(SearchCriteria sc, final boolean whereClause, final boolean enable_query_cache) { StringBuilder sql = new StringBuilder(enable_query_cache ? _partialQueryCacheSelectSql.first() : _partialSelectSql.first()); if (sc != null && !sc.isSelectAll()) { @@ -1189,7 +1189,7 @@ public abstract class GenericDaoBase extends Compone return sql; } - @DB(txn=false) + @DB() protected StringBuilder createPartialSelectSql(SearchCriteria sc, final boolean whereClause) { StringBuilder sql = new StringBuilder(_partialSelectSql.first()); if (sc != null && !sc.isSelectAll()) { @@ -1205,7 +1205,7 @@ public abstract class GenericDaoBase extends Compone } - @DB(txn = false) + @DB() protected void addJoins(StringBuilder str, Collection>> joins) { int fromIndex = str.lastIndexOf("WHERE"); if (fromIndex == -1) { @@ -1238,24 +1238,24 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public List search(final SearchCriteria sc, final Filter filter) { return search(sc, filter, null, false); } - @Override @DB(txn=false) + @Override @DB() public Pair, Integer> searchAndCount(final SearchCriteria sc, final Filter filter) { List objects = search(sc, filter, null, false); Integer count = getCount(sc); return new Pair, Integer>(objects, count); } - @Override @DB(txn=false) + @Override @DB() public List search(final SearchCriteria sc, final Filter filter, final boolean enable_query_cache) { return search(sc, filter, null, false, enable_query_cache); } - @Override @DB(txn=false) + @Override @DB() public boolean update(ID id, T entity) { assert Enhancer.isEnhanced(entity.getClass()) : "Entity is not generated by this dao"; @@ -1264,14 +1264,14 @@ public abstract class GenericDaoBase extends Compone return result; } - @DB(txn=false) + @DB() public int update(final T entity, final SearchCriteria sc, Integer rows) { final UpdateBuilder ub = getUpdateBuilder(entity); return update(ub, sc, rows); } @Override - @DB(txn=false) + @DB() public int update(final T entity, final SearchCriteria sc) { final UpdateBuilder ub = getUpdateBuilder(entity); return update(ub, sc, null); @@ -1390,7 +1390,7 @@ public abstract class GenericDaoBase extends Compone txn.commit(); } - @DB(txn=false) + @DB() protected Object generateValue(final Attribute attr) { if (attr.is(Attribute.Flag.Created) || attr.is(Attribute.Flag.Removed)) { return new Date(); @@ -1414,7 +1414,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected void prepareAttribute(final int j, final PreparedStatement pstmt, final Attribute attr, Object value) throws SQLException { if (attr.is(Attribute.Flag.DaoGenerated) && value == null) { value = generateValue(attr); @@ -1519,7 +1519,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected int prepareAttributes(final PreparedStatement pstmt, final Object entity, final Attribute[] attrs, final int index) throws SQLException { int j = 0; for (int i = 0; i < attrs.length; i++) { @@ -1536,7 +1536,7 @@ public abstract class GenericDaoBase extends Compone return j; } - @SuppressWarnings("unchecked") @DB(txn=false) + @SuppressWarnings("unchecked") @DB() protected T toEntityBean(final ResultSet result, final boolean cache) throws SQLException { final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(this)}); @@ -1553,7 +1553,7 @@ public abstract class GenericDaoBase extends Compone return entity; } - @DB(txn=false) + @DB() protected T toVO(ResultSet result, boolean cache) throws SQLException { T entity; try { @@ -1575,7 +1575,7 @@ public abstract class GenericDaoBase extends Compone return entity; } - @DB(txn=false) + @DB() protected void toEntityBean(final ResultSet result, final T entity) throws SQLException { ResultSetMetaData meta = result.getMetaData(); for (int index = 1, max = meta.getColumnCount(); index <= max; index++) { @@ -1586,7 +1586,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn = true) + @DB() @SuppressWarnings("unchecked") protected void loadCollection(T entity, Attribute attr) { EcInfo ec = (EcInfo)attr.attache; @@ -1688,7 +1688,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected void setField(final Object entity, final ResultSet rs, ResultSetMetaData meta, final int index) throws SQLException { Attribute attr = _allColumns.get(new Pair(meta.getTableName(index), meta.getColumnName(index))); if ( attr == null ){ @@ -1745,7 +1745,7 @@ public abstract class GenericDaoBase extends Compone } protected Cache _cache; - @DB(txn=false) + @DB() protected void createCache(final Map params) { final String value = (String)params.get("cache.size"); @@ -1762,7 +1762,7 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public boolean configure(final String name, final Map params) throws ConfigurationException { _name = name; @@ -1778,19 +1778,19 @@ public abstract class GenericDaoBase extends Compone return true; } - @DB(txn=false) + @DB() public static UpdateBuilder getUpdateBuilder(final T entityObject) { final Factory factory = (Factory)entityObject; assert(factory != null); return (UpdateBuilder)factory.getCallback(1); } - @Override @DB(txn=false) + @Override @DB() public SearchBuilder createSearchBuilder() { return new SearchBuilder(_entityBeanType); } - @Override @DB(txn=false) + @Override @DB() public SearchCriteria createSearchCriteria() { SearchBuilder builder = createSearchBuilder(); return builder.create(); @@ -1859,7 +1859,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected StringBuilder createCountSelect(SearchCriteria sc, final boolean whereClause) { StringBuilder sql = new StringBuilder(_count); diff --git a/maven-standard/pom.xml b/maven-standard/pom.xml index 0960e2f65cc..e4a81d846de 100644 --- a/maven-standard/pom.xml +++ b/maven-standard/pom.xml @@ -19,7 +19,7 @@ 4.0.0 cloud-maven-standard - Apache CloudStack Maven Contentions Parent + Apache CloudStack Maven Conventions Parent Historically ACS was built with a custom build system mixing ant and wscript. When the conversion to maven was done the existing directory structure in git was kept. So the src, testing, and resources folders in ACS don't follow the standard maven conventions. This parent pom forces the folders back to the standard conventions pom diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalDhcpDaoImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalDhcpDaoImpl.java index b21010b8877..3a6aef7cb96 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalDhcpDaoImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalDhcpDaoImpl.java @@ -18,16 +18,11 @@ // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.database; -import java.util.List; -import java.util.Map; - import javax.ejb.Local; -import javax.naming.ConfigurationException; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; @@ -36,7 +31,7 @@ import com.cloud.utils.db.GenericQueryBuilder; @Component @Local(value=BaremetalDhcpDao.class) -@DB(txn=false) +@DB() public class BaremetalDhcpDaoImpl extends GenericDaoBase implements BaremetalDhcpDao { public BaremetalDhcpDaoImpl() { diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalPxeDaoImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalPxeDaoImpl.java index 6afbc1b36dc..bede34760fe 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalPxeDaoImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalPxeDaoImpl.java @@ -18,16 +18,11 @@ // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.database; -import java.util.List; -import java.util.Map; - import javax.ejb.Local; -import javax.naming.ConfigurationException; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; @@ -36,6 +31,6 @@ import com.cloud.utils.db.GenericQueryBuilder; @Component @Local(value = {BaremetalPxeDao.class}) -@DB(txn = false) +@DB() public class BaremetalPxeDaoImpl extends GenericDaoBase implements BaremetalPxeDao { } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 82fd2ce99cc..b1c8ec7622a 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -18,10 +18,10 @@ */ package com.cloud.hypervisor.kvm.storage; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileNotFoundException; import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; import java.net.URISyntaxException; import java.text.DateFormat; @@ -35,11 +35,6 @@ import java.util.UUID; import javax.naming.ConfigurationException; -import com.cloud.agent.api.storage.CopyVolumeAnswer; -import com.cloud.agent.api.to.DataObjectType; -import com.cloud.agent.api.to.S3TO; -import com.cloud.agent.api.to.StorageFilerTO; -import com.cloud.utils.S3Utils; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; @@ -49,6 +44,8 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachAnswer; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; @@ -57,20 +54,28 @@ import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; import org.apache.commons.io.FileUtils; +import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo; import org.libvirt.DomainSnapshot; import org.libvirt.LibvirtException; +import com.ceph.rados.IoCTX; +import com.ceph.rados.Rados; +import com.ceph.rados.RadosException; +import com.ceph.rbd.Rbd; +import com.ceph.rbd.RbdException; +import com.ceph.rbd.RbdImage; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; +import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NfsTO; +import com.cloud.agent.api.to.S3TO; import com.cloud.exception.InternalErrorException; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.hypervisor.kvm.resource.LibvirtConnection; @@ -87,16 +92,10 @@ import com.cloud.storage.template.Processor.FormatInfo; import com.cloud.storage.template.QCOW2Processor; import com.cloud.storage.template.TemplateLocation; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.S3Utils; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; -import com.ceph.rados.Rados; -import com.ceph.rados.RadosException; -import com.ceph.rados.IoCTX; -import com.ceph.rbd.Rbd; -import com.ceph.rbd.RbdImage; -import com.ceph.rbd.RbdException; - import static com.cloud.utils.S3Utils.putFile; public class KVMStorageProcessor implements StorageProcessor { @@ -197,7 +196,7 @@ public class KVMStorageProcessor implements StorageProcessor { primaryPool, cmd.getWaitInMillSeconds()); - DataTO data = null; + DataTO data = null; /** * Force the ImageFormat for RBD templates to RAW * @@ -370,7 +369,7 @@ public class KVMStorageProcessor implements StorageProcessor { String srcVolumeName = srcVolumePath.substring(index + 1); secondaryStoragePool = storagePoolMgr.getStoragePoolByURI( secondaryStorageUrl + File.separator + volumeDir - ); + ); if (!srcVolumeName.endsWith(".qcow2") && srcFormat == ImageFormat.QCOW2) { srcVolumeName = srcVolumeName + ".qcow2"; } @@ -1207,4 +1206,14 @@ public class KVMStorageProcessor implements StorageProcessor { public Answer deleteSnapshot(DeleteCommand cmd) { return new Answer(cmd); } + + @Override + public Answer introduceObject(IntroduceObjectCmd cmd) { + return new Answer(cmd, false, "not implememented yet"); + } + + @Override + public Answer forgetObject(ForgetObjectCmd cmd) { + return new Answer(cmd, false, "not implememented yet"); + } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java index c7768aa5b69..1c992722058 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java @@ -19,14 +19,9 @@ package com.cloud.resource; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.to.DataStoreTO; -import com.cloud.agent.api.to.DataTO; -import com.cloud.agent.api.to.DiskTO; -import com.cloud.agent.api.to.NfsTO; -import com.cloud.agent.manager.SimulatorManager; -import com.cloud.storage.Storage; -import com.cloud.storage.resource.StorageProcessor; +import java.io.File; +import java.util.UUID; + import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; @@ -36,13 +31,21 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachAnswer; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.log4j.Logger; -import java.io.File; -import java.util.UUID; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.DiskTO; +import com.cloud.agent.api.to.NfsTO; +import com.cloud.agent.manager.SimulatorManager; +import com.cloud.storage.Storage; +import com.cloud.storage.resource.StorageProcessor; public class SimulatorStorageProcessor implements StorageProcessor { @@ -214,4 +217,16 @@ public class SimulatorStorageProcessor implements StorageProcessor { public Answer deleteSnapshot(DeleteCommand cmd) { return new Answer(cmd); } + + @Override + public Answer introduceObject(IntroduceObjectCmd cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public Answer forgetObject(ForgetObjectCmd cmd) { + // TODO Auto-generated method stub + return null; + } } diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java index 5cb9b022bf4..5dc6f79bf7d 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java @@ -19,12 +19,10 @@ package com.cloud.ucs.database; import javax.ejb.Local; -import org.springframework.stereotype.Component; - import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = { UcsBladeDao.class }) -@DB(txn = false) +@DB() public class UcsBladeDaoImpl extends GenericDaoBase implements UcsBladeDao { } diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java index 93c088566a1..9500886875f 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java @@ -19,12 +19,10 @@ package com.cloud.ucs.database; import javax.ejb.Local; -import org.springframework.stereotype.Component; - import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = { UcsManagerDao.class }) -@DB(txn = false) +@DB() public class UcsManagerDaoImpl extends GenericDaoBase implements UcsManagerDao { } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java index 7d2d1285c13..20d68b0b8d1 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.SearchCriteria.Op; @Component - @Local(value=LegacyZoneDao.class) @DB(txn=false) + @Local(value=LegacyZoneDao.class) @DB public class LegacyZoneDaoImpl extends GenericDaoBase implements LegacyZoneDao { protected static final Logger s_logger = Logger.getLogger(LegacyZoneDaoImpl.class); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java index 9f5796a073a..6dbbbed5d89 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=VmwareDatacenterDao.class) @DB(txn=false) +@Local(value=VmwareDatacenterDao.class) @DB public class VmwareDatacenterDaoImpl extends GenericDaoBase implements VmwareDatacenterDao { protected static final Logger s_logger = Logger.getLogger(VmwareDatacenterDaoImpl.class); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareHostService.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareHostService.java index 2b44071a87c..d0147d194a6 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareHostService.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareHostService.java @@ -17,6 +17,7 @@ package com.cloud.hypervisor.vmware.manager; import com.cloud.agent.api.Command; +import com.cloud.hypervisor.vmware.mo.DatastoreMO; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.ManagedObjectReference; @@ -28,7 +29,8 @@ public interface VmwareHostService { String getWorkerName(VmwareContext context, Command cmd, int workerSequence); - ManagedObjectReference handleDatastoreAndVmdkAttach(Command cmd, String iqn, String storageHost, int storagePort, - String initiatorUsername, String initiatorPassword, String targetUsername, String targetPassword) throws Exception; + ManagedObjectReference getVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, + String iqn, String initiatorChapName, String initiatorChapSecret, String mutualChapName, String mutualChapSecret) throws Exception; + void createVmdk(Command cmd, DatastoreMO dsMo, String volumeDatastorePath, Long volumeSize) throws Exception; void handleDatastoreAndVmdkDetach(String iqn, String storageHost, int storagePort) throws Exception; } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 83dcc58364b..a35a9661cda 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -2629,6 +2629,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter()); VirtualMachineDiskInfoBuilder diskInfoBuilder = null; VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); + boolean hasSnapshot = false; if (vmMo != null) { s_logger.info("VM " + vmInternalCSName + " already exists, tear down devices for reconfiguration"); if (getVmState(vmMo) != State.Stopped) @@ -2636,7 +2637,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // retrieve disk information before we tear down diskInfoBuilder = vmMo.getDiskInfoBuilder(); - vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + hasSnapshot = vmMo.hasSnapshot(); + if(!hasSnapshot) + vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + else + vmMo.tearDownDevices(new Class[] { VirtualEthernetCard.class }); vmMo.ensureScsiDeviceController(); } else { ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter(); @@ -2654,7 +2659,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmMo.safePowerOff(_shutdown_waitMs); diskInfoBuilder = vmMo.getDiskInfoBuilder(); - vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + hasSnapshot = vmMo.hasSnapshot(); + if(!hasSnapshot) + vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + else + vmMo.tearDownDevices(new Class[] { VirtualEthernetCard.class }); vmMo.ensureScsiDeviceController(); } else { int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024)); @@ -2810,37 +2819,45 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // DiskTO[] sortedDisks = sortVolumesByDeviceId(disks); for (DiskTO vol : sortedDisks) { - deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - if (vol.getType() == Volume.Type.ISO) continue; - + VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol); controllerKey = getDiskController(matchingExistingDisk, vol, vmSpec, ideControllerKey, scsiControllerKey); - VolumeObjectTO volumeTO = (VolumeObjectTO)vol.getData(); - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volumeTO.getDataStore(); - Pair volumeDsDetails = dataStoresDetails.get(primaryStore.getUuid()); - assert (volumeDsDetails != null); - VirtualDevice device; - - String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, - vol, matchingExistingDisk, - dataStoresDetails); - if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) - scsiUnitNumber++; - device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, - diskChain, - volumeDsDetails.first(), - (controllerKey == ideControllerKey) ? ideUnitNumber++ : scsiUnitNumber++, i + 1); - - deviceConfigSpecArray[i].setDevice(device); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - - if(s_logger.isDebugEnabled()) - s_logger.debug("Prepare volume at new device " + _gson.toJson(device)); - - i++; + if(!hasSnapshot) { + deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); + + VolumeObjectTO volumeTO = (VolumeObjectTO)vol.getData(); + PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volumeTO.getDataStore(); + Pair volumeDsDetails = dataStoresDetails.get(primaryStore.getUuid()); + assert (volumeDsDetails != null); + + String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, + vol, matchingExistingDisk, + dataStoresDetails); + if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) + scsiUnitNumber++; + VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, + diskChain, + volumeDsDetails.first(), + (controllerKey == ideControllerKey) ? ideUnitNumber++ : scsiUnitNumber++, i + 1); + + deviceConfigSpecArray[i].setDevice(device); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); + + if(s_logger.isDebugEnabled()) + s_logger.debug("Prepare volume at new device " + _gson.toJson(device)); + + i++; + } else { + if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) + scsiUnitNumber++; + if(controllerKey == ideControllerKey) + ideUnitNumber++; + else + scsiUnitNumber++; + } } // @@ -2887,7 +2904,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa nicCount++; } - vmConfigSpec.getDeviceChange().addAll(Arrays.asList(deviceConfigSpecArray)); + for(int j = 0; j < i; j++) + vmConfigSpec.getDeviceChange().add(deviceConfigSpecArray[j]); // // Setup VM options @@ -4439,7 +4457,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return str.replace('/', '-'); } - private String trimIqn(String iqn) { + public static String trimIqn(String iqn) { String[] tmp = iqn.split("/"); if (tmp.length != 3) { @@ -4454,36 +4472,23 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } @Override - public ManagedObjectReference handleDatastoreAndVmdkAttach(Command cmd, String iqn, String storageHost, int storagePort, - String initiatorUsername, String initiatorPassword, String targetUsername, String targetPassword) throws Exception { + public void createVmdk(Command cmd, DatastoreMO dsMo, String vmdkDatastorePath, Long volumeSize) throws Exception { VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); - ManagedObjectReference morDs = createVmfsDatastore(hyperHost, getDatastoreName(iqn), - storageHost, storagePort, trimIqn(iqn), - initiatorUsername, initiatorPassword, - targetUsername, targetPassword); + String dummyVmName = getWorkerName(context, cmd, 0); - DatastoreMO dsMo = new DatastoreMO(context, morDs); + VirtualMachineMO vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName); - String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName()); - - if (!dsMo.fileExists(volumeDatastorePath)) { - String dummyVmName = getWorkerName(context, cmd, 0); - - VirtualMachineMO vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName); - - if (vmMo == null) { - throw new Exception("Unable to create a dummy VM for volume creation"); - } - - vmMo.createDisk(volumeDatastorePath, getMBsFromBytes(dsMo.getSummary().getFreeSpace()), - morDs, vmMo.getScsiDeviceControllerKey()); - vmMo.detachDisk(volumeDatastorePath, false); - vmMo.destroy(); + if (vmMo == null) { + throw new Exception("Unable to create a dummy VM for volume creation"); } - return morDs; + Long volumeSizeToUse = volumeSize < dsMo.getSummary().getFreeSpace() ? volumeSize : dsMo.getSummary().getFreeSpace(); + + vmMo.createDisk(vmdkDatastorePath, getMBsFromBytes(volumeSizeToUse), dsMo.getMor(), vmMo.getScsiDeviceControllerKey()); + vmMo.detachDisk(vmdkDatastorePath, false); + vmMo.destroy(); } @Override @@ -4516,9 +4521,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ManagedObjectReference morDs = null; if (cmd.getAttach() && cmd.isManaged()) { - morDs = handleDatastoreAndVmdkAttach(cmd, cmd.get_iScsiName(), cmd.getStorageHost(), cmd.getStoragePort(), - cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword(), - cmd.getChapTargetUsername(), cmd.getChapTargetPassword()); + morDs = getVmfsDatastore(hyperHost, getDatastoreName(cmd.get_iScsiName()), cmd.getStorageHost(), cmd.getStoragePort(), trimIqn(cmd.get_iScsiName()), + cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword(), cmd.getChapTargetUsername(), cmd.getChapTargetPassword()); + + DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs); + + String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName()); + + if (!dsMo.fileExists(volumeDatastorePath)) { + createVmdk(cmd, dsMo, VmwareResource.getDatastoreName(cmd.get_iScsiName()), cmd.getVolumeSize()); + } } else { morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getPoolUuid()); @@ -4531,10 +4543,18 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs); - VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), cmd.getVmName(), - dsMo, cmd.getVolumePath()); - - String datastoreVolumePath = dsMo.searchFileInSubFolders(cmd.getVolumePath() + ".vmdk", true); + + String datastoreVolumePath = null; + + if (cmd.isManaged()) { + datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + } + else { + VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), cmd.getVmName(), dsMo, cmd.getVolumePath()); + + datastoreVolumePath = dsMo.searchFileInSubFolders(cmd.getVolumePath() + ".vmdk", true); + } + assert (datastoreVolumePath != null) : "Virtual disk file must exist in specified datastore for attach/detach operations."; if (datastoreVolumePath == null) { throw new CloudRuntimeException("Unable to find file " + cmd.getVolumePath() + ".vmdk in datastore " + dsMo.getName()); @@ -4687,7 +4707,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } - private ManagedObjectReference createVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, + public ManagedObjectReference getVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, String iqn, String chapName, String chapSecret, String mutualChapName, String mutualChapSecret) throws Exception { VmwareContext context = getServiceContext(); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); @@ -5410,7 +5430,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // tear down all devices first before we destroy the VM to avoid accidently delete disk backing files if (getVmState(vmMo) != State.Stopped) vmMo.safePowerOff(_shutdown_waitMs); - vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + vmMo.tearDownDevices(new Class[] { /* VirtualDisk.class, */ VirtualEthernetCard.class }); vmMo.destroy(); for (NetworkDetails netDetails : networks) { diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java index cc25573dd2d..5379bba13ec 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=CiscoNexusVSMDeviceDao.class) @DB(txn=false) +@Local(value=CiscoNexusVSMDeviceDao.class) @DB public class CiscoNexusVSMDeviceDaoImpl extends GenericDaoBase implements CiscoNexusVSMDeviceDao { protected static final Logger s_logger = Logger.getLogger(CiscoNexusVSMDeviceDaoImpl.class); final SearchBuilder mgmtVlanIdSearch; diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java index 2c302ab29fc..c84813f0b30 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java @@ -37,6 +37,7 @@ import com.cloud.hypervisor.vmware.manager.VmwareStorageManager; import com.cloud.hypervisor.vmware.manager.VmwareStorageManagerImpl; import com.cloud.hypervisor.vmware.manager.VmwareStorageMount; import com.cloud.hypervisor.vmware.mo.ClusterMO; +import com.cloud.hypervisor.vmware.mo.DatastoreMO; import com.cloud.hypervisor.vmware.mo.HostMO; import com.cloud.hypervisor.vmware.mo.VmwareHostType; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; @@ -347,8 +348,12 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe return true; } - public ManagedObjectReference handleDatastoreAndVmdkAttach(Command cmd, String iqn, String storageHost, int storagePort, - String initiatorUsername, String initiatorPassword, String targetUsername, String targetPassword) throws Exception { + public ManagedObjectReference getVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, + String iqn, String initiatorChapName, String initiatorChapSecret, String mutualChapName, String mutualChapSecret) throws Exception { + throw new OperationNotSupportedException(); + } + + public void createVmdk(Command cmd, DatastoreMO dsMo, String volumeDatastorePath, Long volumeSize) throws Exception { throw new OperationNotSupportedException(); } diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index 4982d879751..34bfe18cd84 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -26,22 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; - -import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; - -import com.google.gson.Gson; -import com.vmware.vim25.ManagedObjectReference; -import com.vmware.vim25.VirtualDeviceConfigSpec; -import com.vmware.vim25.VirtualDeviceConfigSpecOperation; -import com.vmware.vim25.VirtualDisk; -import com.vmware.vim25.VirtualEthernetCard; -import com.vmware.vim25.VirtualLsiLogicController; -import com.vmware.vim25.VirtualMachineConfigSpec; -import com.vmware.vim25.VirtualMachineFileInfo; -import com.vmware.vim25.VirtualMachineGuestOsIdentifier; -import com.vmware.vim25.VirtualSCSISharing; - import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; @@ -50,10 +34,14 @@ import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.commons.lang.StringUtils; +import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -88,10 +76,13 @@ import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.script.Script; import com.cloud.vm.VirtualMachine.State; +import com.google.gson.Gson; +import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.VirtualDisk; public class VmwareStorageProcessor implements StorageProcessor { private static final Logger s_logger = Logger.getLogger(VmwareStorageProcessor.class); - + private VmwareHostService hostService; private boolean _fullCloneFlag; private VmwareStorageMount mountService; @@ -128,9 +119,9 @@ public class VmwareStorageProcessor implements StorageProcessor { } return null; } - + private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, - String templatePathAtSecondaryStorage, String templateName, String templateUuid) throws Exception { + String templatePathAtSecondaryStorage, String templateName, String templateUuid) throws Exception { s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage @@ -140,9 +131,9 @@ public class VmwareStorageProcessor implements StorageProcessor { s_logger.info("Secondary storage mount point: " + secondaryMountPoint); String srcOVAFileName = VmwareStorageLayoutHelper.getTemplateOnSecStorageFilePath( - secondaryMountPoint, templatePathAtSecondaryStorage, - templateName, ImageFormat.OVA.getFileExtension()); - + secondaryMountPoint, templatePathAtSecondaryStorage, + templateName, ImageFormat.OVA.getFileExtension()); + String srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { Script command = new Script("tar", 0, s_logger); @@ -178,8 +169,8 @@ public class VmwareStorageProcessor implements StorageProcessor { } if(vmMo.createSnapshot("cloud.template.base", "Base snapshot", false, false)) { - // the same template may be deployed with multiple copies at per-datastore per-host basis, - // save the original template name from CloudStack DB as the UUID to associate them. + // the same template may be deployed with multiple copies at per-datastore per-host basis, + // save the original template name from CloudStack DB as the UUID to associate them. vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, templateName); vmMo.markAsTemplate(); } else { @@ -197,7 +188,7 @@ public class VmwareStorageProcessor implements StorageProcessor { DataStoreTO srcStore = srcData.getDataStore(); if (!(srcStore instanceof NfsTO)) { return new CopyCmdAnswer("unsupported protocol"); - } + } NfsTO nfsImageStore = (NfsTO)srcStore; DataTO destData = cmd.getDestTO(); DataStoreTO destStore = destData.getDataStore(); @@ -206,9 +197,9 @@ public class VmwareStorageProcessor implements StorageProcessor { assert (secondaryStorageUrl != null); String templateUrl = secondaryStorageUrl + "/" + srcData.getPath(); - + Pair templateInfo = VmwareStorageLayoutHelper.decodeTemplateRelativePathAndNameFromUrl( - secondaryStorageUrl, templateUrl, template.getName()); + secondaryStorageUrl, templateUrl, template.getName()); VmwareContext context = hostService.getServiceContext(cmd); try { @@ -246,7 +237,7 @@ public class VmwareStorageProcessor implements StorageProcessor { return new CopyCmdAnswer(msg); } } - + private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, DatastoreMO dsMo, String vmdkName, ManagedObjectReference morDatastore, ManagedObjectReference morPool) throws Exception { @@ -265,16 +256,16 @@ public class VmwareStorageProcessor implements StorageProcessor { } s_logger.info("Move volume out of volume-wrapper VM "); - String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, - vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, true); - String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, - vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true); - + String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, + vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, true); + String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, + vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true); + dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); - + dsMo.moveDatastoreFile(vmwareLayoutFilePair[1], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[1], @@ -292,18 +283,18 @@ public class VmwareStorageProcessor implements StorageProcessor { s_logger.error(msg); throw new Exception(msg); } - + s_logger.info("Move volume out of volume-wrapper VM "); - String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, - vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, false); - String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, - vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, false); - + String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, + vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, false); + String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, + vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, false); + dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); - + dsMo.moveDatastoreFile(vmwareLayoutFilePair[1], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[1], @@ -343,17 +334,17 @@ public class VmwareStorageProcessor implements StorageProcessor { throw new Exception("Unable to create a dummy VM for volume creation"); } - String vmdkFilePair[] = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, vmdkName, - VmwareStorageLayoutType.CLOUDSTACK_LEGACY, - true // we only use the first file in the pair, linked or not will not matter - ); + String vmdkFilePair[] = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, vmdkName, + VmwareStorageLayoutType.CLOUDSTACK_LEGACY, + true // we only use the first file in the pair, linked or not will not matter + ); String volumeDatastorePath = vmdkFilePair[0]; synchronized (this) { s_logger.info("Delete file if exists in datastore to clear the way for creating the volume. file: " + volumeDatastorePath); VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vmdkName, dcMo); vmMo.createDisk(volumeDatastorePath, (int) (volume.getSize() / (1024L * 1024L)), morDatastore, -1); vmMo.detachDisk(volumeDatastorePath, false); - } + } VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setPath(vmdkName); @@ -506,7 +497,7 @@ public class VmwareStorageProcessor implements StorageProcessor { try { ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId); - + if (morDs == null) { String msg = "Unable to find volumes's storage pool for copy volume operation"; s_logger.error(msg); @@ -518,7 +509,7 @@ public class VmwareStorageProcessor implements StorageProcessor { // create a dummy worker vm for attaching the volume DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDs); workerVm = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVmName); - + if (workerVm == null) { String msg = "Unable to create worker VM to execute CopyVolumeCommand"; s_logger.error(msg); @@ -657,7 +648,7 @@ public class VmwareStorageProcessor implements StorageProcessor { Pair cloneResult = vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); clonedVm = cloneResult.first(); - + clonedVm.exportVm(secondaryMountPoint + "/" + installPath, templateUniqueName, true, false); long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); @@ -960,7 +951,7 @@ public class VmwareStorageProcessor implements StorageProcessor { throw new Exception("unable to prepare snapshot backup directory"); } } - } + } VirtualMachineMO clonedVm = null; try { @@ -974,7 +965,7 @@ public class VmwareStorageProcessor implements StorageProcessor { // 4 MB is the minimum requirement for VM memory in VMware Pair cloneResult = vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), - VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); + VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); clonedVm = cloneResult.first(); String disks[] = cloneResult.second(); @@ -998,7 +989,7 @@ public class VmwareStorageProcessor implements StorageProcessor { installPath, backupUuid, workerVmName); return new Ternary(backupUuid + "/" + backupUuid, snapshotInfo.first(), snapshotInfo.second()); } - + @Override public Answer backupSnapshot(CopyCommand cmd) { SnapshotObjectTO srcSnapshot = (SnapshotObjectTO)cmd.getSrcTO(); @@ -1025,7 +1016,7 @@ public class VmwareStorageProcessor implements StorageProcessor { String details = null; boolean success = false; String snapshotBackupUuid = null; - + boolean hasOwnerVm = false; Ternary backupResult = null; @@ -1037,7 +1028,7 @@ public class VmwareStorageProcessor implements StorageProcessor { morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStore.getUuid()); CopyCmdAnswer answer = null; - + try { vmMo = hyperHost.findVmOnHyperHost(vmName); if (vmMo == null) { @@ -1050,7 +1041,7 @@ public class VmwareStorageProcessor implements StorageProcessor { dsMo = new DatastoreMO(hyperHost.getContext(), morDs); workerVMName = hostService.getWorkerName(context, cmd, 0); - + vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVMName); if (vmMo == null) { @@ -1062,12 +1053,12 @@ public class VmwareStorageProcessor implements StorageProcessor { String datastoreVolumePath = dsMo.getDatastorePath(volumePath + ".vmdk"); vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs); } else { - s_logger.info("Using owner VM " + vmName + " for snapshot operation"); - hasOwnerVm = true; + s_logger.info("Using owner VM " + vmName + " for snapshot operation"); + hasOwnerVm = true; } } else { - s_logger.info("Using owner VM " + vmName + " for snapshot operation"); - hasOwnerVm = true; + s_logger.info("Using owner VM " + vmName + " for snapshot operation"); + hasOwnerVm = true; } if (!vmMo.createSnapshot(snapshotUuid, "Snapshot taken for " + srcSnapshot.getName(), false, false)) { @@ -1093,52 +1084,52 @@ public class VmwareStorageProcessor implements StorageProcessor { ManagedObjectReference snapshotMor = vmMo.getSnapshotMor(snapshotUuid); if (snapshotMor != null) { vmMo.removeSnapshot(snapshotUuid, false); - + // Snapshot operation may cause disk consolidation in VMware, when this happens // we need to update CloudStack DB // // TODO: this post operation fixup is not atomic and not safe when management server stops // in the middle if(backupResult != null && hasOwnerVm) { - s_logger.info("Check if we have disk consolidation after snapshot operation"); - - boolean chainConsolidated = false; - for(String vmdkDsFilePath : backupResult.third()) { - s_logger.info("Validate disk chain file:" + vmdkDsFilePath); - - if(vmMo.getDiskDevice(vmdkDsFilePath, false) == null) { - s_logger.info("" + vmdkDsFilePath + " no longer exists, consolidation detected"); - chainConsolidated = true; - break; - } else { - s_logger.info("" + vmdkDsFilePath + " is found still in chain"); - } - } - - if(chainConsolidated) { - String topVmdkFilePath = null; - try { - topVmdkFilePath = vmMo.getDiskCurrentTopBackingFileInChain(backupResult.second()); - } catch(Exception e) { - s_logger.error("Unexpected exception", e); - } - - s_logger.info("Disk has been consolidated, top VMDK is now: " + topVmdkFilePath); - if(topVmdkFilePath != null) { - DatastoreFile file = new DatastoreFile(topVmdkFilePath); - - SnapshotObjectTO snapshotInfo = (SnapshotObjectTO)answer.getNewData(); - VolumeObjectTO vol = new VolumeObjectTO(); - vol.setUuid(srcSnapshot.getVolume().getUuid()); - vol.setPath(file.getFileBaseName()); - snapshotInfo.setVolume(vol); - } else { - s_logger.error("Disk has been consolidated, but top VMDK is not found ?!"); - } - } + s_logger.info("Check if we have disk consolidation after snapshot operation"); + + boolean chainConsolidated = false; + for(String vmdkDsFilePath : backupResult.third()) { + s_logger.info("Validate disk chain file:" + vmdkDsFilePath); + + if(vmMo.getDiskDevice(vmdkDsFilePath, false) == null) { + s_logger.info("" + vmdkDsFilePath + " no longer exists, consolidation detected"); + chainConsolidated = true; + break; + } else { + s_logger.info("" + vmdkDsFilePath + " is found still in chain"); + } + } + + if(chainConsolidated) { + String topVmdkFilePath = null; + try { + topVmdkFilePath = vmMo.getDiskCurrentTopBackingFileInChain(backupResult.second()); + } catch(Exception e) { + s_logger.error("Unexpected exception", e); + } + + s_logger.info("Disk has been consolidated, top VMDK is now: " + topVmdkFilePath); + if(topVmdkFilePath != null) { + DatastoreFile file = new DatastoreFile(topVmdkFilePath); + + SnapshotObjectTO snapshotInfo = (SnapshotObjectTO)answer.getNewData(); + VolumeObjectTO vol = new VolumeObjectTO(); + vol.setUuid(srcSnapshot.getVolume().getUuid()); + vol.setPath(file.getFileBaseName()); + snapshotInfo.setVolume(vol); + } else { + s_logger.error("Disk has been consolidated, but top VMDK is not found ?!"); + } + } } } else { - s_logger.error("Can not find the snapshot we just used ?!"); + s_logger.error("Can not find the snapshot we just used ?!"); } } @@ -1152,7 +1143,7 @@ public class VmwareStorageProcessor implements StorageProcessor { s_logger.warn("Failed to destroy worker VM: " + workerVMName); } } - + return answer; } catch (Throwable e) { if (e instanceof RemoteException) { @@ -1200,12 +1191,20 @@ public class VmwareStorageProcessor implements StorageProcessor { ManagedObjectReference morDs = null; if (isAttach && isManaged) { - morDs = hostService.handleDatastoreAndVmdkAttach(cmd, iScsiName, storageHost, storagePort, - initiatorUsername, initiatorPassword, targetUsername, targetPassword); + morDs = hostService.getVmfsDatastore(hyperHost, VmwareResource.getDatastoreName(iScsiName), storageHost, storagePort, + VmwareResource.trimIqn(iScsiName), initiatorUsername, initiatorPassword, targetUsername, targetPassword); + + DatastoreMO dsMo = new DatastoreMO(hostService.getServiceContext(null), morDs); + + String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName()); + + if (!dsMo.fileExists(volumeDatastorePath)) { + hostService.createVmdk(cmd, dsMo, VmwareResource.getDatastoreName(iScsiName), volumeTO.getSize()); + } } else { morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, isManaged ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid()); - } + } if (morDs == null) { String msg = "Unable to find the mounted datastore to execute AttachVolumeCommand, vmName: " + vmName; @@ -1216,31 +1215,42 @@ public class VmwareStorageProcessor implements StorageProcessor { DatastoreMO dsMo = new DatastoreMO(this.hostService.getServiceContext(null), morDs); String datastoreVolumePath; - if(isAttach) { - if(!isManaged) - datastoreVolumePath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), vmName, - dsMo, volumeTO.getPath()); - else - datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); - } else { - datastoreVolumePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, volumeTO.getPath() + ".vmdk"); - if(!dsMo.fileExists(datastoreVolumePath)) - datastoreVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, volumeTO.getPath() + ".vmdk"); + if (isAttach) { + if (isManaged) { + datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + } + else { + datastoreVolumePath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), vmName, dsMo, volumeTO.getPath()); + } } - + else { + if (isManaged) { + datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + } + else { + datastoreVolumePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, volumeTO.getPath() + ".vmdk"); + + if (!dsMo.fileExists(datastoreVolumePath)) { + datastoreVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, volumeTO.getPath() + ".vmdk"); + } + } + } + disk.setVdiUuid(datastoreVolumePath); AttachAnswer answer = new AttachAnswer(disk); + if (isAttach) { vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs); - } else { + } + else { vmMo.removeAllSnapshots(); vmMo.detachDisk(datastoreVolumePath, false); if (isManaged) { this.hostService.handleDatastoreAndVmdkDetach(iScsiName, storageHost, storagePort); } else { - VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, volumeTO.getPath()); + VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, volumeTO.getPath()); } } @@ -1274,7 +1284,7 @@ public class VmwareStorageProcessor implements StorageProcessor { return morDatastore; } - + private Answer attachIso(DiskTO disk, boolean isAttach, String vmName) { try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(hostService.getServiceContext(null), null); @@ -1387,7 +1397,7 @@ public class VmwareStorageProcessor implements StorageProcessor { String volumeDatastorePath = dsMo.getDatastorePath(volumeUuid + ".vmdk"); String dummyVmName = this.hostService.getWorkerName(context, cmd, 0); try { - s_logger.info("Create worker VM " + dummyVmName); + s_logger.info("Create worker VM " + dummyVmName); vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName); if (vmMo == null) { throw new Exception("Unable to create a dummy VM for volume creation"); @@ -1408,8 +1418,8 @@ public class VmwareStorageProcessor implements StorageProcessor { } finally { s_logger.info("Destroy dummy VM after volume creation"); if(vmMo != null) { - vmMo.detachAllDisks(); - vmMo.destroy(); + vmMo.detachAllDisks(); + vmMo.destroy(); } } } catch (Throwable e) { @@ -1460,7 +1470,7 @@ public class VmwareStorageProcessor implements StorageProcessor { ClusterMO clusterMo = new ClusterMO(context, morCluster); if (vol.getVolumeType() == Volume.Type.ROOT) { - + String vmName = vol.getVmName(); if (vmName != null) { VirtualMachineMO vmMo = clusterMo.findVmOnHyperHost(vmName); @@ -1471,12 +1481,12 @@ public class VmwareStorageProcessor implements StorageProcessor { // Remove all snapshots to consolidate disks for removal vmMo.removeAllSnapshots(); - + VirtualMachineDiskInfo diskInfo = null; if(vol.getChainInfo() != null) - diskInfo = _gson.fromJson(vol.getChainInfo(), VirtualMachineDiskInfo.class); - - + diskInfo = _gson.fromJson(vol.getChainInfo(), VirtualMachineDiskInfo.class); + + HostMO hostMo = vmMo.getRunningHost(); List networks = vmMo.getNetworksWithDetails(); @@ -1484,7 +1494,7 @@ public class VmwareStorageProcessor implements StorageProcessor { if (this.resource.getVmState(vmMo) != State.Stopped) { vmMo.safePowerOff(_shutdown_waitMs); } - + List detachedDisks = vmMo.detachAllDisksExcept(vol.getPath(), diskInfo != null ? diskInfo.getDiskDeviceBusName() : null); VmwareStorageLayoutHelper.moveVolumeToRootFolder(new DatacenterMO(context, morDc), detachedDisks); @@ -1501,13 +1511,13 @@ public class VmwareStorageProcessor implements StorageProcessor { } } -/* + /* if (s_logger.isInfoEnabled()) { s_logger.info("Destroy volume by original name: " + vol.getPath() + ".vmdk"); } VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc)); -*/ + */ return new Answer(cmd, true, "Success"); } @@ -1527,8 +1537,8 @@ public class VmwareStorageProcessor implements StorageProcessor { } } - VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc)); - + VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc)); + return new Answer(cmd, true, "Success"); } catch (Throwable e) { if (e instanceof RemoteException) { @@ -1672,10 +1682,20 @@ public class VmwareStorageProcessor implements StorageProcessor { return new Answer(cmd, false, "unsupported command"); } } - + + @Override + public Answer introduceObject(IntroduceObjectCmd cmd) { + return new Answer(cmd, false, "not implememented yet"); + } + + @Override + public Answer forgetObject(ForgetObjectCmd cmd) { + return new Answer(cmd, false, "not implememented yet"); + } + private static String deriveTemplateUuidOnHost(VmwareHypervisorHost hyperHost, String storeIdentifier, String templateName) { - String templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes()).toString(); - templateUuid = templateUuid.replaceAll("-", ""); - return templateUuid; + String templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes()).toString(); + templateUuid = templateUuid.replaceAll("-", ""); + return templateUuid; } } diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 6b81c25c303..92fbab28bd2 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -5348,7 +5348,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe if (pool.getType() == StoragePoolType.NetworkFilesystem) { getNfsSR(conn, pool); } else if (pool.getType() == StoragePoolType.IscsiLUN) { - getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, new Boolean[1]); + getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null); } else if (pool.getType() == StoragePoolType.PreSetup) { } else { return new Answer(cmd, false, "The pool type: " + pool.getType().name() + " is not supported."); @@ -6166,17 +6166,27 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } protected VDI getVDIbyUuid(Connection conn, String uuid) { + return getVDIbyUuid(conn, uuid, true); + } + + protected VDI getVDIbyUuid(Connection conn, String uuid, boolean throwExceptionIfNotFound) { try { return VDI.getByUuid(conn, uuid); } catch (Exception e) { - String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString(); - s_logger.debug(msg); - throw new CloudRuntimeException(msg, e); + if (throwExceptionIfNotFound) { + String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString(); + + s_logger.debug(msg); + + throw new CloudRuntimeException(msg, e); + } + + return null; } } protected SR getIscsiSR(Connection conn, String srNameLabel, String target, String path, - String chapInitiatorUsername, String chapInitiatorPassword, Boolean[] created) { + String chapInitiatorUsername, String chapInitiatorPassword) { synchronized (srNameLabel.intern()) { Map deviceConfig = new HashMap(); try { @@ -6280,8 +6290,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe { sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true, smConfig); - - created[0] = true; // note that the SR was created (as opposed to introduced) } else { sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel, type, "user", true, smConfig); @@ -6459,54 +6467,41 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - protected VDI handleSrAndVdiAttach(String iqn, String storageHostName, - String chapInitiatorName, String chapInitiatorPassword) throws Types.XenAPIException, XmlRpcException { + protected VDI createVdi(SR sr, String vdiNameLabel, Long volumeSize) throws Types.XenAPIException, XmlRpcException { VDI vdi = null; Connection conn = getConnection(); - Boolean[] created = { false }; + VDI.Record vdir = new VDI.Record(); - SR sr = getIscsiSR(conn, iqn, - storageHostName, iqn, - chapInitiatorName, chapInitiatorPassword, created); + vdir.nameLabel = vdiNameLabel; + vdir.SR = sr; + vdir.type = Types.VdiType.USER; - // if created[0] is true, this means the SR was actually created...as opposed to introduced - if (created[0]) { - VDI.Record vdir = new VDI.Record(); - - vdir.nameLabel = iqn; - vdir.SR = sr; - vdir.type = Types.VdiType.USER; - - long totalSpace = sr.getPhysicalSize(conn); - long unavailableSpace = sr.getPhysicalUtilisation(conn); - - vdir.virtualSize = totalSpace - unavailableSpace; - - if (vdir.virtualSize < 0) { - throw new CloudRuntimeException("VDI virtual size cannot be less than 0."); - } - - long maxNumberOfTries = (totalSpace / unavailableSpace >= 1) ? (totalSpace / unavailableSpace) : 1; - long tryNumber = 0; - - while (tryNumber <= maxNumberOfTries) { - try { - vdi = VDI.create(conn, vdir); - - break; - } - catch (Exception ex) { - tryNumber++; - - vdir.virtualSize -= unavailableSpace; - } - } + long totalSrSpace = sr.getPhysicalSize(conn); + long unavailableSrSpace = sr.getPhysicalUtilisation(conn); + long availableSrSpace = totalSrSpace - unavailableSrSpace; + if (availableSrSpace < volumeSize) { + throw new CloudRuntimeException("Available space for SR cannot be less than " + volumeSize + "."); } - else { - vdi = sr.getVDIs(conn).iterator().next(); + + vdir.virtualSize = volumeSize; + + long maxNumberOfTries = (totalSrSpace / unavailableSrSpace >= 1) ? (totalSrSpace / unavailableSrSpace) : 1; + long tryNumber = 0; + + while (tryNumber <= maxNumberOfTries) { + try { + vdi = VDI.create(conn, vdir); + + break; + } + catch (Exception ex) { + tryNumber++; + + vdir.virtualSize -= unavailableSrSpace; + } } return vdi; @@ -6534,12 +6529,17 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } try { - // Look up the VDI VDI vdi = null; if (cmd.getAttach() && cmd.isManaged()) { - vdi = handleSrAndVdiAttach(cmd.get_iScsiName(), cmd.getStorageHost(), - cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword()); + SR sr = getIscsiSR(conn, cmd.get_iScsiName(), cmd.getStorageHost(), cmd.get_iScsiName(), + cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword()); + + vdi = getVDIbyUuid(conn, cmd.getVolumePath(), false); + + if (vdi == null) { + vdi = createVdi(sr, cmd.get_iScsiName(), cmd.getVolumeSize()); + } } else { vdi = getVDIbyUuid(conn, cmd.getVolumePath()); diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java index 739b9743f44..2d4c86e86dd 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java @@ -18,6 +18,36 @@ */ package com.cloud.hypervisor.xen.resource; +import java.io.File; +import java.net.URI; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import org.apache.cloudstack.storage.command.AttachAnswer; +import org.apache.cloudstack.storage.command.AttachCommand; +import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer; +import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.command.CreateObjectCommand; +import org.apache.cloudstack.storage.command.DeleteCommand; +import org.apache.cloudstack.storage.command.DettachAnswer; +import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectAnswer; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; +import org.apache.xmlrpc.XmlRpcException; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateStoragePoolCommand; import com.cloud.agent.api.to.DataObjectType; @@ -51,33 +81,6 @@ import com.xensource.xenapi.VBD; import com.xensource.xenapi.VDI; import com.xensource.xenapi.VM; import com.xensource.xenapi.VMGuestMetrics; -import org.apache.cloudstack.storage.command.AttachAnswer; -import org.apache.cloudstack.storage.command.AttachCommand; -import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer; -import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; -import org.apache.cloudstack.storage.command.CopyCmdAnswer; -import org.apache.cloudstack.storage.command.CopyCommand; -import org.apache.cloudstack.storage.command.CreateObjectAnswer; -import org.apache.cloudstack.storage.command.CreateObjectCommand; -import org.apache.cloudstack.storage.command.DeleteCommand; -import org.apache.cloudstack.storage.command.DettachAnswer; -import org.apache.cloudstack.storage.command.DettachCommand; -import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.cloudstack.storage.to.TemplateObjectTO; -import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; -import org.apache.xmlrpc.XmlRpcException; - -import java.io.File; -import java.net.URI; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; import static com.cloud.utils.ReflectUtil.flattenProperties; import static com.google.common.collect.Lists.newArrayList; @@ -164,12 +167,20 @@ public class XenServerStorageProcessor implements StorageProcessor { try { Connection conn = this.hypervisorResource.getConnection(); - // Look up the VDI + VDI vdi = null; if (cmd.isManaged()) { - vdi = this.hypervisorResource.handleSrAndVdiAttach(cmd.get_iScsiName(), cmd.getStorageHost(), - cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword()); + SR sr = this.hypervisorResource.getIscsiSR(conn, cmd.get_iScsiName(), cmd.getStorageHost(), cmd.get_iScsiName(), + cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword()); + + vdi = this.hypervisorResource.getVDIbyUuid(conn, data.getPath(), false); + + if (vdi == null) { + VolumeObjectTO volume = (VolumeObjectTO)data; + + vdi = this.hypervisorResource.createVdi(sr, cmd.get_iScsiName(), volume.getSize()); + } } else { vdi = this.hypervisorResource.mount(conn, null, null, data.getPath()); @@ -841,8 +852,7 @@ public class XenServerStorageProcessor implements StorageProcessor { URI uri = new URI(storeUrl); String tmplpath = uri.getHost() + ":" + uri.getPath() + "/" + srcData.getPath(); - PrimaryDataStoreTO destStore = (PrimaryDataStoreTO)destData.getDataStore(); - String poolName = destStore.getUuid(); + String poolName = destData.getDataStore().getUuid(); Connection conn = hypervisorResource.getConnection(); SR poolsr = null; @@ -892,8 +902,7 @@ public class XenServerStorageProcessor implements StorageProcessor { try { Connection conn = hypervisorResource.getConnection(); - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)data.getDataStore(); - SR poolSr = hypervisorResource.getStorageRepository(conn, primaryStore.getUuid()); + SR poolSr = hypervisorResource.getStorageRepository(conn, data.getDataStore().getUuid()); VDI.Record vdir = new VDI.Record(); vdir.nameLabel = volume.getName(); vdir.SR = poolSr; @@ -921,7 +930,6 @@ public class XenServerStorageProcessor implements StorageProcessor { Connection conn = hypervisorResource.getConnection(); DataTO srcData = cmd.getSrcTO(); DataTO destData = cmd.getDestTO(); - PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore(); VolumeObjectTO volume = (VolumeObjectTO)destData; VDI vdi = null; try { @@ -943,7 +951,7 @@ public class XenServerStorageProcessor implements StorageProcessor { return new CopyCmdAnswer(newVol); } catch (Exception e) { - s_logger.warn("Unable to create volume; Pool=" + pool + "; Disk: ", e); + s_logger.warn("Unable to create volume; Pool=" + destData + "; Disk: ", e); return new CopyCmdAnswer(e.toString()); } } @@ -956,13 +964,12 @@ public class XenServerStorageProcessor implements StorageProcessor { int wait = cmd.getWait(); VolumeObjectTO srcVolume = (VolumeObjectTO)srcData; VolumeObjectTO destVolume = (VolumeObjectTO)destData; - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)destVolume.getDataStore(); DataStoreTO srcStore = srcVolume.getDataStore(); if (srcStore instanceof NfsTO) { NfsTO nfsStore = (NfsTO)srcStore; try { - SR primaryStoragePool = hypervisorResource.getStorageRepository(conn, primaryStore.getUuid()); + SR primaryStoragePool = hypervisorResource.getStorageRepository(conn, destVolume.getDataStore().getUuid()); String srUuid = primaryStoragePool.getUuid(conn); URI uri = new URI(nfsStore.getUrl()); String volumePath = uri.getHost() + ":" + uri.getPath() + File.separator + srcVolume.getPath(); @@ -1179,8 +1186,7 @@ public class XenServerStorageProcessor implements StorageProcessor { DataTO cacheData = cmd.getCacheTO(); DataTO destData = cmd.getDestTO(); int wait = cmd.getWait(); - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcData.getDataStore(); - String primaryStorageNameLabel = primaryStore.getUuid(); + String primaryStorageNameLabel = srcData.getDataStore().getUuid(); String secondaryStorageUrl = null; NfsTO cacheStore = null; String destPath = null; @@ -1415,7 +1421,6 @@ public class XenServerStorageProcessor implements StorageProcessor { DataTO srcData = cmd.getSrcTO(); SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData; DataTO destData = cmd.getDestTO(); - PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore(); DataStoreTO imageStore = srcData.getDataStore(); if (!(imageStore instanceof NfsTO)) { @@ -1423,7 +1428,7 @@ public class XenServerStorageProcessor implements StorageProcessor { } NfsTO nfsImageStore = (NfsTO)imageStore; - String primaryStorageNameLabel = pool.getUuid(); + String primaryStorageNameLabel = destData.getDataStore().getUuid(); String secondaryStorageUrl = nfsImageStore.getUrl(); int wait = cmd.getWait(); boolean result = false; @@ -1503,4 +1508,32 @@ public class XenServerStorageProcessor implements StorageProcessor { } return new Answer(cmd, false, "unsupported storage type"); } + + @Override + public Answer introduceObject(IntroduceObjectCmd cmd) { + try { + Connection conn = hypervisorResource.getConnection(); + DataStoreTO store = cmd.getDataTO().getDataStore(); + SR poolSr = hypervisorResource.getStorageRepository(conn, store.getUuid()); + poolSr.scan(conn); + return new IntroduceObjectAnswer(cmd.getDataTO()); + } catch (Exception e) { + s_logger.debug("Failed to introduce object", e); + return new Answer(cmd, false, e.toString()); + } + } + + @Override + public Answer forgetObject(ForgetObjectCmd cmd) { + try { + Connection conn = hypervisorResource.getConnection(); + DataTO data = cmd.getDataTO(); + VDI vdi = VDI.getByUuid(conn, data.getPath()); + vdi.forget(conn); + return new IntroduceObjectAnswer(cmd.getDataTO()); + } catch (Exception e) { + s_logger.debug("Failed to introduce object", e); + return new Answer(cmd, false, e.toString()); + } + } } diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/dao/NetScalerPodDaoImpl.java b/plugins/network-elements/netscaler/src/com/cloud/network/dao/NetScalerPodDaoImpl.java index 30dd06db1aa..e9af1066da0 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/dao/NetScalerPodDaoImpl.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/dao/NetScalerPodDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=NetScalerPodDao.class) @DB(txn=false) +@Local(value=NetScalerPodDao.class) @DB public class NetScalerPodDaoImpl extends GenericDaoBase implements NetScalerPodDao { final SearchBuilder podIdSearch; diff --git a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java index 7ca482422e3..f31aea3bcb1 100644 --- a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java +++ b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java @@ -66,7 +66,10 @@ public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl { details.get(ApiConstants.S3_SOCKET_TIMEOUT) == null ? null : Integer.valueOf(details .get(ApiConstants.S3_SOCKET_TIMEOUT)), imgStore.getCreated(), _configDao.getValue(Config.S3EnableRRS.toString()) == null ? false : Boolean.parseBoolean(_configDao - .getValue(Config.S3EnableRRS.toString()))); + .getValue(Config.S3EnableRRS.toString())), + _configDao.getValue(Config.S3EnableMultiPartUpload.toString()) == null ? true : Boolean.parseBoolean(_configDao + .getValue(Config.S3EnableMultiPartUpload.toString())) + ); } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java index c73e409af6b..8046b6cfd1b 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java @@ -277,8 +277,10 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { iops = new Iops(volumeInfo.getMinIops(), volumeInfo.getMaxIops(), getDefaultBurstIops(storagePoolId, volumeInfo.getMaxIops())); } + long volumeSize = volumeInfo.getSize() * 2; // in reality, use a multiplier that's at cluster-level scope + long sfVolumeId = SolidFireUtil.createSolidFireVolume(mVip, mPort, clusterAdminUsername, clusterAdminPassword, - getSolidFireVolumeName(volumeInfo.getName()), sfAccountId, volumeInfo.getSize(), true, + getSolidFireVolumeName(volumeInfo.getName()), sfAccountId, volumeSize, true, volumeInfo.getSize().toString(), iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops()); return SolidFireUtil.getSolidFireVolume(mVip, mPort, clusterAdminUsername, clusterAdminPassword, sfVolumeId); diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java index ac11272a0c1..6659f98f15f 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java @@ -78,13 +78,13 @@ public class SolidFireUtil public static final String USE_MUTUAL_CHAP_FOR_VMWARE = "useMutualChapForVMware"; public static long createSolidFireVolume(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword, - String strSfVolumeName, long lSfAccountId, long lTotalSize, boolean bEnable512e, + String strSfVolumeName, long lSfAccountId, long lTotalSize, boolean bEnable512e, final String strCloudStackVolumeSize, long lMinIops, long lMaxIops, long lBurstIops) { final Gson gson = new GsonBuilder().create(); VolumeToCreate volumeToCreate = new VolumeToCreate(strSfVolumeName, lSfAccountId, lTotalSize, bEnable512e, - lMinIops, lMaxIops, lBurstIops); + strCloudStackVolumeSize, lMinIops, lMaxIops, lBurstIops); String strVolumeToCreateJson = gson.toJson(volumeToCreate); @@ -443,10 +443,10 @@ public class SolidFireUtil private final VolumeToCreateParams params; private VolumeToCreate(final String strVolumeName, final long lAccountId, final long lTotalSize, - final boolean bEnable512e, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) + final boolean bEnable512e, final String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { params = new VolumeToCreateParams(strVolumeName, lAccountId, lTotalSize, bEnable512e, - lMinIOPS, lMaxIOPS, lBurstIOPS); + strCloudStackVolumeSize, lMinIOPS, lMaxIOPS, lBurstIOPS); } private static final class VolumeToCreateParams @@ -456,18 +456,30 @@ public class SolidFireUtil private final long totalSize; private final boolean enable512e; private final VolumeToCreateParamsQoS qos; + private final VolumeToCreateParamsAttributes attributes; private VolumeToCreateParams(final String strVolumeName, final long lAccountId, final long lTotalSize, - final boolean bEnable512e, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) + final boolean bEnable512e, final String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { name = strVolumeName; accountID = lAccountId; totalSize = lTotalSize; enable512e = bEnable512e; + attributes = new VolumeToCreateParamsAttributes(strCloudStackVolumeSize); qos = new VolumeToCreateParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS); } + private static final class VolumeToCreateParamsAttributes + { + private final String CloudStackVolumeSize; + + private VolumeToCreateParamsAttributes(final String strCloudStackVolumeSize) + { + CloudStackVolumeSize = strCloudStackVolumeSize; + } + } + private static final class VolumeToCreateParamsQoS { private final long minIOPS; diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index 5f3cceba95d..bd4f1588ece 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -768,6 +768,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags) { Filter searchFilter = new Filter(UserVmJoinVO.class, c.getOrderBy(), c.getAscending(), c.getOffset(), c.getLimit()); + boolean isRootAdmin = _accountMgr.isRootAdmin(caller.getType()); // first search distinct vm id by using query criteria and pagination SearchBuilder sb = _userVmJoinDao.createSearchBuilder(); @@ -831,6 +832,10 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { sb.and("affinityGroupId", sb.entity().getAffinityGroupId(), SearchCriteria.Op.EQ); } + if(!isRootAdmin){ + sb.and("displayVm", sb.entity().isDisplayVm(), SearchCriteria.Op.EQ); + } + // populate the search criteria with the values passed in SearchCriteria sc = sb.create(); @@ -936,6 +941,9 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { sc.setParameters("affinityGroupId", affinityGroupId); } + if(!isRootAdmin){ + sc.setParameters("displayVm", 1); + } // search vm details by ids Pair, Integer> uniqueVmPair = _userVmJoinDao.searchAndCount(sc, searchFilter); Integer count = uniqueVmPair.second(); @@ -1622,6 +1630,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { String keyword = cmd.getKeyword(); String type = cmd.getType(); Map tags = cmd.getTags(); + boolean isRootAdmin = _accountMgr.isRootAdmin(caller.getType()); Long zoneId = cmd.getZoneId(); Long podId = null; @@ -1663,6 +1672,9 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { // display UserVM volumes only sb.and().op("type", sb.entity().getVmType(), SearchCriteria.Op.NIN); sb.or("nulltype", sb.entity().getVmType(), SearchCriteria.Op.NULL); + if(!isRootAdmin){ + sb.and("displayVolume", sb.entity().isDisplayVolume(), SearchCriteria.Op.EQ); + } sb.cp(); @@ -1713,6 +1725,10 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { sc.setParameters("podId", podId); } + if(!isRootAdmin){ + sc.setParameters("displayVolume", 1); + } + // Don't return DomR and ConsoleProxy volumes sc.setParameters("type", VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm, VirtualMachine.Type.DomainRouter); @@ -2236,6 +2252,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { Object id = cmd.getId(); Object keyword = cmd.getKeyword(); Long domainId = cmd.getDomainId(); + Boolean isRootAdmin = _accountMgr.isRootAdmin(account.getType()); // Keeping this logic consistent with domain specific zones // if a domainId is provided, we just return the disk offering // associated with this domain @@ -2244,6 +2261,9 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { // check if the user's domain == do's domain || user's domain is // a child of so's domain for non-root users sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); + if(!isRootAdmin){ + sc.addAnd("displayOffering", SearchCriteria.Op.EQ, 1); + } return _diskOfferingJoinDao.searchAndCount(sc, searchFilter); } else { throw new PermissionDeniedException("The account:" + account.getAccountName() @@ -2276,6 +2296,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { spc.addOr("domainId", SearchCriteria.Op.NULL); // include public // offering as where sc.addAnd("domainId", SearchCriteria.Op.SC, spc); + sc.addAnd("displayOffering", SearchCriteria.Op.EQ, 1); sc.addAnd("systemUse", SearchCriteria.Op.EQ, false); // non-root // users should // not see diff --git a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java index df6e583058c..b76ff34a489 100644 --- a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java @@ -142,6 +142,10 @@ public class VolumeJoinDaoImpl extends GenericDaoBase implem } } + if (caller.getType() == Account.ACCOUNT_TYPE_ADMIN){ + volResponse.setPath(volume.getPath()); + } + // populate owner. ApiResponseHelper.populateOwner(volResponse, volume); diff --git a/server/src/com/cloud/api/query/vo/VolumeJoinVO.java b/server/src/com/cloud/api/query/vo/VolumeJoinVO.java index c7b43ba1569..9fe9fd1fe5b 100644 --- a/server/src/com/cloud/api/query/vo/VolumeJoinVO.java +++ b/server/src/com/cloud/api/query/vo/VolumeJoinVO.java @@ -260,10 +260,12 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="tag_customer") private String tagCustomer; - @Column(name="display_volume", updatable=true, nullable=false) protected boolean displayVolume; + @Column(name="path") + protected String path; + public VolumeJoinVO() { } @@ -1091,6 +1093,13 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { } + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } } diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 8ca595b14d0..1377bf72161 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -376,6 +376,7 @@ public enum Config { // object store S3EnableRRS("Advanced", ManagementServer.class, Boolean.class, "s3.rrs.enabled", "false", "enable s3 reduced redundancy storage", null), + S3EnableMultiPartUpload("Advanced", ManagementServer.class, Boolean.class, "s3.multipart.enabled", "true", "enable s3 multipart upload", null), // Ldap LdapBasedn("Advanced", ManagementServer.class, String.class, "ldap.basedn", null, "Sets the basedn for LDAP", null), diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index 2b86bf51238..4fda3b152ee 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -2310,6 +2310,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String name = cmd.getDiskOfferingName(); String displayText = cmd.getDisplayText(); Integer sortKey = cmd.getSortKey(); + Boolean displayDiskOffering = cmd.getDisplayOffering(); // Check if diskOffering exists DiskOffering diskOfferingHandle = _entityMgr.findById(DiskOffering.class, diskOfferingId); @@ -2318,7 +2319,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Unable to find disk offering by id " + diskOfferingId); } - boolean updateNeeded = (name != null || displayText != null || sortKey != null); + boolean updateNeeded = (name != null || displayText != null || sortKey != null || displayDiskOffering != null); if (!updateNeeded) { return _diskOfferingDao.findById(diskOfferingId); } @@ -2337,6 +2338,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati diskOffering.setSortKey(sortKey); } + if(displayDiskOffering != null){ + diskOffering.setDisplayOffering(displayDiskOffering); + } + // Note: tag editing commented out for now;keeping the code intact, // might need to re-enable in next releases // if (tags != null) diff --git a/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java b/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java index bc58021a4d4..1936b85015e 100644 --- a/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java +++ b/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import org.springframework.stereotype.Component; import com.cloud.dc.DedicatedResourceVO; -import com.cloud.dc.HostPodVO; import com.cloud.utils.Pair; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @@ -35,7 +34,7 @@ import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; @Component -@Local(value={DedicatedResourceDao.class}) @DB(txn = false) +@Local(value={DedicatedResourceDao.class}) @DB public class DedicatedResourceDaoImpl extends GenericDaoBase implements DedicatedResourceDao { protected final SearchBuilder ZoneSearch; protected final SearchBuilder PodSearch; diff --git a/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java b/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java index d0effaba607..ace7c9127b5 100644 --- a/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java +++ b/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java @@ -57,6 +57,10 @@ public class HypervisorGuruManagerImpl extends ManagerBase implements Hypervisor @Override public HypervisorGuru getGuru(HypervisorType hypervisorType) { + if (hypervisorType == null) { + return null; + } + HypervisorGuru result = _hvGurus.get(hypervisorType); if ( result == null ) { diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index faff10f11fd..079f90c2bc9 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -1047,8 +1047,17 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic HypervisorType rootDiskHyperType = vm.getHypervisorType(); HypervisorType dataDiskHyperType = _volsDao.getHypervisorType(volume.getId()); - if (dataDiskHyperType != HypervisorType.None && rootDiskHyperType != dataDiskHyperType) { - throw new InvalidParameterValueException("Can't attach a volume created by: " + dataDiskHyperType + " to a " + rootDiskHyperType + " vm"); + + VolumeVO dataDiskVol = _volsDao.findById(volume.getId()); + StoragePoolVO dataDiskStoragePool = _storagePoolDao.findById(dataDiskVol.getPoolId()); + + // managed storage can be used for different types of hypervisors + // only perform this check if the volume's storage pool is not null and not managed + if (dataDiskStoragePool != null && !dataDiskStoragePool.isManaged()) { + if (dataDiskHyperType != HypervisorType.None && rootDiskHyperType != dataDiskHyperType) { + throw new InvalidParameterValueException("Can't attach a volume created by: " + dataDiskHyperType + + " to a " + rootDiskHyperType + " vm"); + } } deviceId = getDeviceId(vmId, deviceId); @@ -1106,12 +1115,16 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Override @ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPDATE, eventDescription = "updating volume", async = true) - public Volume updateVolume(long volumeId, String path, String state, Long storageId) { + public Volume updateVolume(long volumeId, String path, String state, Long storageId, Boolean displayVolume) { VolumeVO volume = _volumeDao.findById(volumeId); if (path != null) { volume.setPath(path); } + + if (displayVolume != null) { + volume.setDisplayVolume(displayVolume); + } if (state != null) { try { diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 2297e6adde3..0b53cfda11f 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -48,7 +48,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -69,13 +68,11 @@ import com.cloud.event.ActionEventUtils; import com.cloud.event.EventTypes; import com.cloud.event.EventVO; import com.cloud.event.UsageEventUtils; -import com.cloud.event.dao.EventDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.resource.ResourceManager; @@ -193,10 +190,10 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject VolumeDataFactory volFactory; @Inject SnapshotDataFactory snapshotFactory; @Inject EndPointSelector _epSelector; - @Inject - private ResourceManager _resourceMgr; - @Inject - protected List snapshotStrategies; + @Inject + private ResourceManager _resourceMgr; + @Inject + protected List snapshotStrategies; private int _totalRetries; @@ -260,17 +257,39 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return null; } + @Override + public boolean revertSnapshot(Long snapshotId) { + Snapshot snapshot = _snapshotDao.findById(snapshotId); + if (snapshot == null) { + throw new InvalidParameterValueException("No such snapshot"); + } + + SnapshotStrategy snapshotStrategy = null; + for (SnapshotStrategy strategy : snapshotStrategies) { + if (strategy.canHandle(snapshot)) { + snapshotStrategy = strategy; + break; + } + } + + if (snapshotStrategy == null) { + return false; + } + + return snapshotStrategy.revertSnapshot(snapshotId); + } + @Override @DB @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "creating snapshot", async = true) public Snapshot createSnapshot(Long volumeId, Long policyId, Long snapshotId, Account snapshotOwner) { VolumeInfo volume = volFactory.getVolume(volumeId); if (volume == null) { - throw new InvalidParameterValueException("No such volume exist"); + throw new InvalidParameterValueException("No such volume exist"); } if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("Volume is not in ready state"); + throw new InvalidParameterValueException("Volume is not in ready state"); } @@ -281,16 +300,16 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotInfo snapshot = snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Primary); try { - postCreateSnapshot(volumeId, snapshot.getId(), policyId); - //Check if the snapshot was removed while backingUp. If yes, do not log snapshot create usage event - SnapshotVO freshSnapshot = _snapshotDao.findById(snapshot.getId()); - if ((freshSnapshot != null) && backedUp) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), - snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, - volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); - } + postCreateSnapshot(volumeId, snapshot.getId(), policyId); + //Check if the snapshot was removed while backingUp. If yes, do not log snapshot create usage event + SnapshotVO freshSnapshot = _snapshotDao.findById(snapshot.getId()); + if ((freshSnapshot != null) && backedUp) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), + snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, + volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); + } - _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); + _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); } catch(Exception e) { s_logger.debug("Failed to create snapshot", e); @@ -311,12 +330,12 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Override public Snapshot backupSnapshot(Long snapshotId) { - SnapshotInfo snapshot = snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Image); - if (snapshot != null) { - throw new CloudRuntimeException("Already in the backup snapshot:" + snapshotId); - } + SnapshotInfo snapshot = snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Image); + if (snapshot != null) { + throw new CloudRuntimeException("Already in the backup snapshot:" + snapshotId); + } - return snapshotSrv.backupSnapshot(snapshot); + return snapshotSrv.backupSnapshot(snapshot); } /* @@ -412,14 +431,14 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Override public SnapshotVO getParentSnapshot(VolumeInfo volume) { - long preId = _snapshotDao.getLastSnapshot(volume.getId(), DataStoreRole.Primary); + long preId = _snapshotDao.getLastSnapshot(volume.getId(), DataStoreRole.Primary); - SnapshotVO preSnapshotVO = null; - if (preId != 0 && !(volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId()))) { - preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preId); - } + SnapshotVO preSnapshotVO = null; + if (preId != 0 && !(volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId()))) { + preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preId); + } - return preSnapshotVO; + return preSnapshotVO; } private Long getSnapshotUserId() { @@ -463,7 +482,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, s_logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId); } if(deleteSnapshot(oldSnapId)){ - //log Snapshot delete event + //log Snapshot delete event ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, oldestSnapshot.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_SNAPSHOT_DELETE, "Successfully deleted oldest snapshot: " + oldSnapId, 0); } snaps.remove(oldestSnapshot); @@ -485,27 +504,27 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, _accountMgr.checkAccess(caller, null, true, snapshotCheck); SnapshotStrategy snapshotStrategy = null; for (SnapshotStrategy strategy : snapshotStrategies) { - if (strategy.canHandle(snapshotCheck)) { - snapshotStrategy = strategy; - break; - } + if (strategy.canHandle(snapshotCheck)) { + snapshotStrategy = strategy; + break; + } } try { - boolean result = snapshotStrategy.deleteSnapshot(snapshotId); - if (result) { + boolean result = snapshotStrategy.deleteSnapshot(snapshotId); + if (result) { if (snapshotCheck.getState() == Snapshot.State.BackedUp) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshotCheck.getAccountId(), - snapshotCheck.getDataCenterId(), snapshotId, snapshotCheck.getName(), null, null, 0L, - snapshotCheck.getClass().getName(), snapshotCheck.getUuid()); - } + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshotCheck.getAccountId(), + snapshotCheck.getDataCenterId(), snapshotId, snapshotCheck.getName(), null, null, 0L, + snapshotCheck.getClass().getName(), snapshotCheck.getUuid()); + } _resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.snapshot); _resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.secondary_storage, new Long(snapshotCheck.getSize())); - } - return result; + } + return result; } catch (Exception e) { - s_logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString()); - throw new CloudRuntimeException("Failed to delete snapshot:" + e.toString()); + s_logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString()); + throw new CloudRuntimeException("Failed to delete snapshot:" + e.toString()); } } @@ -544,10 +563,10 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), null); - _accountMgr.buildACLSearchParameters(caller, id, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); - Long domainId = domainIdRecursiveListProject.first(); - Boolean isRecursive = domainIdRecursiveListProject.second(); - ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); + _accountMgr.buildACLSearchParameters(caller, id, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); + Long domainId = domainIdRecursiveListProject.first(); + Boolean isRecursive = domainIdRecursiveListProject.second(); + ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); Filter searchFilter = new Filter(SnapshotVO.class, "created", false, cmd.getStartIndex(), cmd.getPageSizeVal()); SearchBuilder sb = _snapshotDao.createSearchBuilder(); @@ -560,7 +579,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, sb.and("snapshotTypeEQ", sb.entity().getsnapshotType(), SearchCriteria.Op.IN); sb.and("snapshotTypeNEQ", sb.entity().getsnapshotType(), SearchCriteria.Op.NEQ); sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); - + if (tags != null && !tags.isEmpty()) { SearchBuilder tagSearch = _resourceTagDao.createSearchBuilder(); for (int count=0; count < tags.size(); count++) { @@ -595,7 +614,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, if (zoneId != null) { sc.setParameters("dataCenterId", zoneId); } - + if (name != null) { sc.setParameters("name", "%" + name + "%"); } @@ -763,10 +782,10 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, long domainLimit = _resourceLimitMgr.findCorrectResourceLimitForDomain(_domainMgr.getDomain(owner.getDomainId()), ResourceType.snapshot); int max = cmd.getMaxSnaps().intValue(); if (owner.getType() != Account.ACCOUNT_TYPE_ADMIN && ((accountLimit != -1 && max > accountLimit) || (domainLimit != -1 && max > domainLimit))) { - String message = "domain/account"; - if (owner.getType() == Account.ACCOUNT_TYPE_PROJECT) { - message = "domain/project"; - } + String message = "domain/account"; + if (owner.getType() == Account.ACCOUNT_TYPE_PROJECT) { + message = "domain/project"; + } throw new InvalidParameterValueException("Max number of snapshots shouldn't exceed the " + message + " level snapshot limit"); } @@ -905,37 +924,37 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, private boolean hostSupportSnapsthotForVolume(HostVO host, VolumeInfo volume) { - if (host.getHypervisorType() != HypervisorType.KVM) { - return true; - } + if (host.getHypervisorType() != HypervisorType.KVM) { + return true; + } //Turn off snapshot by default for KVM if the volume attached to vm that is not in the Stopped/Destroyed state, - //unless it is set in the global flag - Long vmId = volume.getInstanceId(); - if (vmId != null) { - VMInstanceVO vm = _vmDao.findById(vmId); - if (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Destroyed) { - boolean snapshotEnabled = Boolean.parseBoolean(_configDao.getValue("kvm.snapshot.enabled")); - if (!snapshotEnabled) { - s_logger.debug("Snapshot is not supported on host " + host + " for the volume " + volume + " attached to the vm " + vm); - return false; - } - } - } - - // Determine host capabilities - String caps = host.getCapabilities(); + //unless it is set in the global flag + Long vmId = volume.getInstanceId(); + if (vmId != null) { + VMInstanceVO vm = _vmDao.findById(vmId); + if (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Destroyed) { + boolean snapshotEnabled = Boolean.parseBoolean(_configDao.getValue("kvm.snapshot.enabled")); + if (!snapshotEnabled) { + s_logger.debug("Snapshot is not supported on host " + host + " for the volume " + volume + " attached to the vm " + vm); + return false; + } + } + } - if (caps != null) { - String[] tokens = caps.split(","); - for (String token : tokens) { - if (token.contains("snapshot")) { - return true; - } - } - } - return false; - } + // Determine host capabilities + String caps = host.getCapabilities(); + + if (caps != null) { + String[] tokens = caps.split(","); + for (String token : tokens) { + if (token.contains("snapshot")) { + return true; + } + } + } + return false; + } private boolean supportedByHypervisor(VolumeInfo volume) { HypervisorType hypervisorType; @@ -967,10 +986,10 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } - // if volume is attached to a vm in destroyed or expunging state; disallow - if (volume.getInstanceId() != null) { - UserVmVO userVm = _vmDao.findById(volume.getInstanceId()); - if (userVm != null) { + // if volume is attached to a vm in destroyed or expunging state; disallow + if (volume.getInstanceId() != null) { + UserVmVO userVm = _vmDao.findById(volume.getInstanceId()); + if (userVm != null) { if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) { throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volume.getId() + " is associated with vm:" + userVm.getInstanceName() + " is in " @@ -993,11 +1012,11 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, throw new CloudRuntimeException( "There is other active vm snapshot tasks on the instance to which the volume is attached, please try again later"); } - } - } + } + } - return true; - } + return true; + } @Override @DB public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationException { @@ -1124,10 +1143,10 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, public boolean canOperateOnVolume(Volume volume) { List snapshots = _snapshotDao.listByStatus(volume.getId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); - if (snapshots.size() > 0) { - return false; - } - return true; + if (snapshots.size() > 0) { + return false; + } + return true; } @Override diff --git a/server/test/com/cloud/ha/KVMFencerTest.java b/server/test/com/cloud/ha/KVMFencerTest.java new file mode 100644 index 00000000000..d34ef018bef --- /dev/null +++ b/server/test/com/cloud/ha/KVMFencerTest.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.ha; + +import java.util.Arrays; +import java.util.Collections; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.FenceAnswer; +import com.cloud.agent.api.FenceCommand; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.vm.VirtualMachine; + +@RunWith(MockitoJUnitRunner.class) +public class KVMFencerTest { + + @Mock + HostDao hostDao; + @Mock + AgentManager agentManager; + @Mock + ResourceManager resourceManager; + + KVMFencer fencer; + + @Before + public void setup() { + fencer = new KVMFencer(); + fencer._agentMgr = agentManager; + fencer._hostDao = hostDao; + fencer._resourceMgr = resourceManager; + } + + @Test + public void testWithSingleHost() { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(1l); + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Collections.singletonList(host)); + Assert.assertFalse(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithSingleHostDown() { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Down); + Mockito.when(host.getId()).thenReturn(1l); + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Collections.singletonList(host)); + Assert.assertFalse(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithHosts() throws AgentUnavailableException, + OperationTimedoutException { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(1l); + + HostVO secondHost = Mockito.mock(HostVO.class); + Mockito.when(secondHost.getClusterId()).thenReturn(1l); + Mockito.when(secondHost.getHypervisorType()).thenReturn( + HypervisorType.KVM); + Mockito.when(secondHost.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(2l); + + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Arrays.asList(host, secondHost)); + + FenceAnswer answer = new FenceAnswer(null, true, "ok"); + Mockito.when( + agentManager.send(Mockito.anyLong(), + Mockito.any(FenceCommand.class))).thenReturn(answer); + + Assert.assertTrue(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithFailingFence() throws AgentUnavailableException, + OperationTimedoutException { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(1l); + + HostVO secondHost = Mockito.mock(HostVO.class); + Mockito.when(secondHost.getClusterId()).thenReturn(1l); + Mockito.when(secondHost.getHypervisorType()).thenReturn( + HypervisorType.KVM); + Mockito.when(secondHost.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(2l); + + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Arrays.asList(host, secondHost)); + + Mockito.when( + agentManager.send(Mockito.anyLong(), + Mockito.any(FenceCommand.class))).thenThrow( + new AgentUnavailableException(2l)); + + Assert.assertFalse(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithTimeoutingFence() throws AgentUnavailableException, + OperationTimedoutException { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(1l); + + HostVO secondHost = Mockito.mock(HostVO.class); + Mockito.when(secondHost.getClusterId()).thenReturn(1l); + Mockito.when(secondHost.getHypervisorType()).thenReturn( + HypervisorType.KVM); + Mockito.when(secondHost.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(2l); + + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Arrays.asList(host, secondHost)); + + Mockito.when( + agentManager.send(Mockito.anyLong(), + Mockito.any(FenceCommand.class))).thenThrow( + new OperationTimedoutException(null, 2l, 0l, 0, false)); + + Assert.assertFalse(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithSingleNotKVM() { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.Any); + Mockito.when(host.getStatus()).thenReturn(Status.Down); + Mockito.when(host.getId()).thenReturn(1l); + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Collections.singletonList(host)); + Assert.assertNull(fencer.fenceOff(virtualMachine, host)); + } + +} diff --git a/server/test/com/cloud/vpc/dao/MockNetworkDaoImpl.java b/server/test/com/cloud/vpc/dao/MockNetworkDaoImpl.java index 145b6c5c833..1aef649f047 100644 --- a/server/test/com/cloud/vpc/dao/MockNetworkDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockNetworkDaoImpl.java @@ -32,7 +32,7 @@ import java.util.List; import java.util.Map; @Local(value = NetworkDao.class) -@DB(txn = false) +@DB() public class MockNetworkDaoImpl extends GenericDaoBase implements NetworkDao{ /* (non-Javadoc) diff --git a/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java b/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java index a8208dd7d9c..f6a3b13fe90 100644 --- a/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java @@ -33,10 +33,9 @@ import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingDaoImpl; import com.cloud.utils.db.DB; -import com.cloud.utils.db.GenericDaoBase; @Local(value = NetworkOfferingDao.class) -@DB(txn = false) +@DB() public class MockNetworkOfferingDaoImpl extends NetworkOfferingDaoImpl implements NetworkOfferingDao{ private static final Logger s_logger = Logger.getLogger(MockNetworkOfferingDaoImpl.class); diff --git a/server/test/com/cloud/vpc/dao/MockNetworkOfferingServiceMapDaoImpl.java b/server/test/com/cloud/vpc/dao/MockNetworkOfferingServiceMapDaoImpl.java index d1e835471c8..a7f77bc0e48 100644 --- a/server/test/com/cloud/vpc/dao/MockNetworkOfferingServiceMapDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockNetworkOfferingServiceMapDaoImpl.java @@ -16,19 +16,15 @@ // under the License. package com.cloud.vpc.dao; -import java.util.ArrayList; -import java.util.List; - import javax.ejb.Local; import com.cloud.network.Network.Service; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDaoImpl; import com.cloud.utils.db.DB; -import com.cloud.utils.db.SearchCriteria; @Local(value = NetworkOfferingServiceMapDao.class) -@DB(txn = false) +@DB() public class MockNetworkOfferingServiceMapDaoImpl extends NetworkOfferingServiceMapDaoImpl{ @Override diff --git a/server/test/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java b/server/test/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java index 103f04ea8b9..c5c0a063013 100644 --- a/server/test/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java @@ -28,7 +28,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = NetworkServiceMapDao.class) -@DB(txn = false) +@DB() public class MockNetworkServiceMapDaoImpl extends GenericDaoBase implements NetworkServiceMapDao{ /* (non-Javadoc) diff --git a/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java b/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java index 5e1c2ecef56..e7674e09c3d 100644 --- a/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = VpcDao.class) -@DB(txn = false) +@DB() public class MockVpcDaoImpl extends GenericDaoBase implements VpcDao{ private static final Logger s_logger = Logger.getLogger(MockNetworkOfferingDaoImpl.class); diff --git a/server/test/com/cloud/vpc/dao/MockVpcOfferingDaoImpl.java b/server/test/com/cloud/vpc/dao/MockVpcOfferingDaoImpl.java index 329931e1dd4..48df3d466ef 100644 --- a/server/test/com/cloud/vpc/dao/MockVpcOfferingDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockVpcOfferingDaoImpl.java @@ -24,7 +24,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = VpcOfferingDao.class) -@DB(txn = false) +@DB() public class MockVpcOfferingDaoImpl extends GenericDaoBase implements VpcOfferingDao{ /* (non-Javadoc) diff --git a/server/test/com/cloud/vpc/dao/MockVpcOfferingServiceMapDaoImpl.java b/server/test/com/cloud/vpc/dao/MockVpcOfferingServiceMapDaoImpl.java index 3357686af87..9618536aa75 100644 --- a/server/test/com/cloud/vpc/dao/MockVpcOfferingServiceMapDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockVpcOfferingServiceMapDaoImpl.java @@ -27,7 +27,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = VpcOfferingServiceMapDao.class) -@DB(txn = false) +@DB() public class MockVpcOfferingServiceMapDaoImpl extends GenericDaoBase implements VpcOfferingServiceMapDao{ /* (non-Javadoc) diff --git a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 3ef950b1a0b..85d25f9a860 100755 --- a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.storage.resource; + +import static com.cloud.utils.S3Utils.mputFile; import static com.cloud.utils.S3Utils.putFile; import static com.cloud.utils.StringUtils.join; import static com.cloud.utils.db.GlobalLock.executeWithNoWaitLock; @@ -23,7 +25,15 @@ import static java.lang.String.format; import static java.util.Arrays.asList; import static org.apache.commons.lang.StringUtils.substringAfterLast; -import java.io.*; +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; import java.math.BigInteger; import java.net.InetAddress; import java.net.URI; @@ -39,10 +49,19 @@ import java.util.concurrent.Callable; import javax.naming.ConfigurationException; -import com.cloud.agent.api.storage.*; -import com.cloud.storage.VMTemplateStorageResourceAssoc; -import com.cloud.storage.template.*; -import com.cloud.utils.SwiftUtil; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.NameValuePair; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.utils.URLEncodedUtils; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.log4j.Logger; + +import com.amazonaws.services.s3.model.S3ObjectSummary; + import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.DeleteCommand; @@ -56,18 +75,7 @@ import org.apache.cloudstack.storage.template.UploadManagerImpl; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.NameValuePair; -import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.utils.URLEncodedUtils; -import org.apache.http.impl.client.DefaultHttpClient; -import org.apache.log4j.Logger; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckHealthAnswer; import com.cloud.agent.api.CheckHealthCommand; @@ -88,6 +96,14 @@ import com.cloud.agent.api.SecStorageSetupCommand.Certificates; import com.cloud.agent.api.SecStorageVMSetupCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupSecondaryStorageCommand; +import com.cloud.agent.api.storage.CreateEntityDownloadURLCommand; +import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand; +import com.cloud.agent.api.storage.DownloadAnswer; +import com.cloud.agent.api.storage.ListTemplateAnswer; +import com.cloud.agent.api.storage.ListTemplateCommand; +import com.cloud.agent.api.storage.ListVolumeAnswer; +import com.cloud.agent.api.storage.ListVolumeCommand; +import com.cloud.agent.api.storage.UploadCommand; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; @@ -102,10 +118,19 @@ import com.cloud.resource.ServerResourceBase; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.template.Processor; import com.cloud.storage.template.Processor.FormatInfo; +import com.cloud.storage.template.QCOW2Processor; +import com.cloud.storage.template.RawImageProcessor; +import com.cloud.storage.template.TemplateLocation; +import com.cloud.storage.template.TemplateProp; +import com.cloud.storage.template.VhdProcessor; +import com.cloud.storage.template.VmdkProcessor; import com.cloud.utils.NumbersUtil; import com.cloud.utils.S3Utils; import com.cloud.utils.S3Utils.FileNamingStrategy; +import com.cloud.utils.SwiftUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.OutputInterpreter; @@ -172,7 +197,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } public void setInSystemVM(boolean inSystemVM) { - this._inSystemVM = inSystemVM; + _inSystemVM = inSystemVM; } @Override @@ -272,7 +297,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S String finalFileName = templateFilename; String finalDownloadPath = destPath + File.separator + templateFilename; // compute the size of - long size = this._storage.getSize(downloadPath + File.separator + templateFilename); + long size = _storage.getSize(downloadPath + File.separator + templateFilename); DataTO newDestTO = null; @@ -349,7 +374,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S protected Answer copySnapshotToTemplateFromNfsToNfsXenserver(CopyCommand cmd, SnapshotObjectTO srcData, NfsTO srcDataStore, TemplateObjectTO destData, NfsTO destDataStore) { - String srcMountPoint = this.getRootDir(srcDataStore.getUrl()); + String srcMountPoint = getRootDir(srcDataStore.getUrl()); String snapshotPath = srcData.getPath(); int index = snapshotPath.lastIndexOf("/"); String snapshotName = snapshotPath.substring(index + 1); @@ -357,17 +382,18 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S snapshotName = snapshotName + ".vhd"; } snapshotPath = snapshotPath.substring(0, index); + snapshotPath = srcMountPoint + File.separator + snapshotPath; - String destMountPoint = this.getRootDir(destDataStore.getUrl()); + String destMountPoint = getRootDir(destDataStore.getUrl()); String destPath = destMountPoint + File.separator + destData.getPath(); String errMsg = null; try { - this._storage.mkdir(destPath); + _storage.mkdir(destPath); String templateUuid = UUID.randomUUID().toString(); String templateName = templateUuid + ".vhd"; - Script command = new Script(this.createTemplateFromSnapshotXenScript, cmd.getWait() * 1000, s_logger); + Script command = new Script(createTemplateFromSnapshotXenScript, cmd.getWait() * 1000, s_logger); command.add("-p", snapshotPath); command.add("-s", snapshotName); command.add("-n", templateName); @@ -424,7 +450,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S // get snapshot file name String templateName = srcFile.getName(); - // add kvm file extension for copied template name + // add kvm file extension for copied template name String fileName = templateName + "." + srcFormat.getFileExtension(); String destFileFullPath = destFile.getAbsolutePath() + File.separator + fileName; s_logger.debug("copy snapshot " + srcFile.getAbsolutePath() + " to template " + destFileFullPath); @@ -442,7 +468,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S bufferWriter.write("\n"); bufferWriter.write("filename=" + fileName); bufferWriter.write("\n"); - long size = this._storage.getSize(destFileFullPath); + long size = _storage.getSize(destFileFullPath); bufferWriter.write("size=" + size); bufferWriter.close(); writer.close(); @@ -509,15 +535,16 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S DataTO destData = cmd.getDestTO(); DataStoreTO srcDataStore = srcData.getDataStore(); DataStoreTO destDataStore = destData.getDataStore(); - if (srcDataStore.getRole() == DataStoreRole.Image || srcDataStore.getRole() == DataStoreRole.ImageCache) { + if (srcDataStore.getRole() == DataStoreRole.Image || srcDataStore.getRole() == DataStoreRole.ImageCache || + srcDataStore.getRole() == DataStoreRole.Primary) { if (!(srcDataStore instanceof NfsTO)) { s_logger.debug("only support nfs storage as src, when create template from snapshot"); return Answer.createUnsupportedCommandAnswer(cmd); } if (destDataStore instanceof NfsTO) { - return copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO) srcData, (NfsTO) srcDataStore, - (TemplateObjectTO) destData, (NfsTO) destDataStore); + return copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO) srcData, (NfsTO)srcDataStore, + (TemplateObjectTO) destData, (NfsTO)destDataStore); } else if (destDataStore instanceof SwiftTO) { //create template on the same data store CopyCmdAnswer answer = (CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO) srcData, (NfsTO) srcDataStore, @@ -543,8 +570,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S execute(deleteCommand); } catch (Exception e) { s_logger.debug("Failed to clean up staging area:", e); - } - + } + TemplateObjectTO template = new TemplateObjectTO(); template.setPath(swiftPath); template.setSize(templateFile.length()); @@ -569,7 +596,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S execute(deleteCommand); } catch (Exception e) { s_logger.debug("Failed to clean up staging area:", e); - } + } return result; } } @@ -603,7 +630,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S NfsTO destImageStore = (NfsTO) destDataStore; if (srcDataStore instanceof S3TO) { S3TO s3 = (S3TO) srcDataStore; - return this.copyFromS3ToNfs(cmd, srcData, s3, destData, destImageStore); + return copyFromS3ToNfs(cmd, srcData, s3, destData, destImageStore); } else if (srcDataStore instanceof SwiftTO) { return copyFromSwiftToNfs(cmd, srcData, (SwiftTO)srcDataStore, destData, destImageStore); } @@ -792,7 +819,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S processor.configure("template processor", new HashMap()); return processor.getVirtualSize(file); } catch (Exception e) { - s_logger.debug("Failed to get virtual size:" ,e); + s_logger.debug("Failed to get virtual size:" ,e); } return file.length(); } @@ -830,9 +857,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } } } - ImageFormat format = this.getTemplateFormat(srcFile.getName()); + ImageFormat format = getTemplateFormat(srcFile.getName()); String key = destData.getPath() + S3Utils.SEPARATOR + srcFile.getName(); - putFile(s3, srcFile, bucket, key); + if (s3.isMultipartEnabled()){ + mputFile(s3, srcFile, bucket, key); + } else{ + putFile(s3, srcFile, bucket, key); + } DataTO retObj = null; if (destData.getObjectType() == DataObjectType.TEMPLATE) { @@ -1244,9 +1275,9 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S int index = name.lastIndexOf(File.separator); String snapshotPath = name.substring(0, index); if (deleteAllFlag) { - lPath = this.getRootDir(secondaryStorageUrl) + File.separator + snapshotPath + File.separator + "*"; + lPath = getRootDir(secondaryStorageUrl) + File.separator + snapshotPath + File.separator + "*"; } else { - lPath = this.getRootDir(secondaryStorageUrl) + File.separator + name + "*"; + lPath = getRootDir(secondaryStorageUrl) + File.separator + name + "*"; } final String result = deleteLocalFile(lPath); @@ -1434,7 +1465,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S Map s3ListTemplate(S3TO s3) { String bucket = s3.getBucketName(); // List the objects in the source directory on S3 - final List objectSummaries = S3Utils.getDirectory(s3, bucket, this.TEMPLATE_ROOT_DIR); + final List objectSummaries = S3Utils.getDirectory(s3, bucket, TEMPLATE_ROOT_DIR); if (objectSummaries == null) { return null; } @@ -1443,7 +1474,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S String key = objectSummary.getKey(); // String installPath = StringUtils.substringBeforeLast(key, // S3Utils.SEPARATOR); - String uniqueName = this.determineS3TemplateNameFromKey(key); + String uniqueName = determineS3TemplateNameFromKey(key); // TODO: isPublic value, where to get? TemplateProp tInfo = new TemplateProp(uniqueName, key, objectSummary.getSize(), objectSummary.getSize(), true, false); @@ -1456,7 +1487,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S Map s3ListVolume(S3TO s3) { String bucket = s3.getBucketName(); // List the objects in the source directory on S3 - final List objectSummaries = S3Utils.getDirectory(s3, bucket, this.VOLUME_ROOT_DIR); + final List objectSummaries = S3Utils.getDirectory(s3, bucket, VOLUME_ROOT_DIR); if (objectSummaries == null) { return null; } @@ -1465,7 +1496,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S String key = objectSummary.getKey(); // String installPath = StringUtils.substringBeforeLast(key, // S3Utils.SEPARATOR); - Long id = this.determineS3VolumeIdFromKey(key); + Long id = determineS3VolumeIdFromKey(key); // TODO: how to get volume template name TemplateProp tInfo = new TemplateProp(id.toString(), key, objectSummary.getSize(), objectSummary.getSize(), true, false); @@ -2226,8 +2257,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S * * CIFS parameters are documented with mount.cifs at * http://linux.die.net/man/8/mount.cifs - * For simplicity, when a URI is used to specify a CIFS share, - * options such as domain,user,password are passed as query parameters. + * For simplicity, when a URI is used to specify a CIFS share, + * options such as domain,user,password are passed as query parameters. * * @param uri * crresponding to the remote device. Will throw for unsupported @@ -2262,7 +2293,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S return dir; } - + protected void umount(String localRootPath, URI uri) { ensureLocalRootPathExists(localRootPath, uri); @@ -2286,7 +2317,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } s_logger.debug("Successfully umounted " + localRootPath); } - + protected void mount(String localRootPath, String remoteDevice, URI uri) { s_logger.debug("mount " + uri.toString() + " on " + localRootPath); ensureLocalRootPathExists(localRootPath, uri); diff --git a/setup/db/db/schema-420to430.sql b/setup/db/db/schema-420to430.sql index 44a884d99ef..653ff77c090 100644 --- a/setup/db/db/schema-420to430.sql +++ b/setup/db/db/schema-420to430.sql @@ -280,4 +280,115 @@ CREATE VIEW `cloud`.`template_view` AS `cloud`.`resource_tags` ON resource_tags.resource_id = vm_template.id and (resource_tags.resource_type = 'Template' or resource_tags.resource_type='ISO') where - vm_template.state='Active'; + vm_template.state='Active'; +DROP VIEW IF EXISTS `cloud`.`volume_view`; +CREATE VIEW `cloud`.`volume_view` AS + select + volumes.id, + volumes.uuid, + volumes.name, + volumes.device_id, + volumes.volume_type, + volumes.size, + volumes.min_iops, + volumes.max_iops, + volumes.created, + volumes.state, + volumes.attached, + volumes.removed, + volumes.pod_id, + volumes.display_volume, + volumes.format, + volumes.path, + account.id account_id, + account.uuid account_uuid, + account.account_name account_name, + account.type account_type, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path, + projects.id project_id, + projects.uuid project_uuid, + projects.name project_name, + data_center.id data_center_id, + data_center.uuid data_center_uuid, + data_center.name data_center_name, + data_center.networktype data_center_type, + vm_instance.id vm_id, + vm_instance.uuid vm_uuid, + vm_instance.name vm_name, + vm_instance.state vm_state, + vm_instance.vm_type, + user_vm.display_name vm_display_name, + volume_store_ref.size volume_store_size, + volume_store_ref.download_pct, + volume_store_ref.download_state, + volume_store_ref.error_str, + volume_store_ref.created created_on_store, + disk_offering.id disk_offering_id, + disk_offering.uuid disk_offering_uuid, + disk_offering.name disk_offering_name, + disk_offering.display_text disk_offering_display_text, + disk_offering.use_local_storage, + disk_offering.system_use, + disk_offering.bytes_read_rate, + disk_offering.bytes_write_rate, + disk_offering.iops_read_rate, + disk_offering.iops_write_rate, + storage_pool.id pool_id, + storage_pool.uuid pool_uuid, + storage_pool.name pool_name, + cluster.hypervisor_type, + vm_template.id template_id, + vm_template.uuid template_uuid, + vm_template.extractable, + vm_template.type template_type, + resource_tags.id tag_id, + resource_tags.uuid tag_uuid, + resource_tags.key tag_key, + resource_tags.value tag_value, + resource_tags.domain_id tag_domain_id, + resource_tags.account_id tag_account_id, + resource_tags.resource_id tag_resource_id, + resource_tags.resource_uuid tag_resource_uuid, + resource_tags.resource_type tag_resource_type, + resource_tags.customer tag_customer, + async_job.id job_id, + async_job.uuid job_uuid, + async_job.job_status job_status, + async_job.account_id job_account_id + from + `cloud`.`volumes` + inner join + `cloud`.`account` ON volumes.account_id = account.id + inner join + `cloud`.`domain` ON volumes.domain_id = domain.id + left join + `cloud`.`projects` ON projects.project_account_id = account.id + left join + `cloud`.`data_center` ON volumes.data_center_id = data_center.id + left join + `cloud`.`vm_instance` ON volumes.instance_id = vm_instance.id + left join + `cloud`.`user_vm` ON user_vm.id = vm_instance.id + left join + `cloud`.`volume_store_ref` ON volumes.id = volume_store_ref.volume_id + left join + `cloud`.`disk_offering` ON volumes.disk_offering_id = disk_offering.id + left join + `cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id + left join + `cloud`.`cluster` ON storage_pool.cluster_id = cluster.id + left join + `cloud`.`vm_template` ON volumes.template_id = vm_template.id OR volumes.iso_id = vm_template.id + left join + `cloud`.`resource_tags` ON resource_tags.resource_id = volumes.id + and resource_tags.resource_type = 'Volume' + left join + `cloud`.`async_job` ON async_job.instance_id = volumes.id + and async_job.instance_type = 'Volume' + and async_job.job_status = 0; + +INSERT IGNORE INTO `cloud`.`configuration`(category, instance, component, name, value, description, default_value) VALUES ('Advanced', 'DEFAULT', 'management-server', 's3.multipart.enabled', 'true', 'enable s3 multipart upload', 'true'); + diff --git a/systemvm/pom.xml b/systemvm/pom.xml index aa399402c14..9fe2688705e 100644 --- a/systemvm/pom.xml +++ b/systemvm/pom.xml @@ -204,7 +204,7 @@ vmware - nonoss + noredist diff --git a/test/integration/component/test_vpc_network.py b/test/integration/component/test_vpc_network.py index 970a6254c85..ffd41a28d5d 100644 --- a/test/integration/component/test_vpc_network.py +++ b/test/integration/component/test_vpc_network.py @@ -27,7 +27,7 @@ from marvin.integration.lib.base import * from marvin.integration.lib.common import * from marvin.remoteSSHClient import remoteSSHClient import datetime - +from ddt import ddt, data class Services: """Test VPC network services @@ -96,6 +96,35 @@ class Services: "SourceNat": {"SupportedSourceNatTypes": "peraccount"}, }, }, + "network_offering_vpcNS": { + "name": 'VPC Network offering', + "displaytext": 'VPC Network off', + "guestiptype": 'Isolated', + "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL', + "traffictype": 'GUEST', + "availability": 'Optional', + "useVpc": 'on', + "serviceProviderList": { + "Vpn": 'VpcVirtualRouter', + "Dhcp": 'VpcVirtualRouter', + "Dns": 'VpcVirtualRouter', + "SourceNat": 'VpcVirtualRouter', + "PortForwarding": 'VpcVirtualRouter', + "Lb": 'Netscaler', + "UserData": 'VpcVirtualRouter', + "StaticNat": 'VpcVirtualRouter', + "NetworkACL": 'VpcVirtualRouter' + }, + "serviceCapabilityList": { + "SourceNat": { + "SupportedSourceNatTypes": "peraccount" + }, + "lb": { + "SupportedLbIsolation": "dedicated" + }, + }, + }, + "network_off_shared": { "name": 'Shared Network offering', "displaytext": 'Shared Network offering', @@ -116,6 +145,18 @@ class Services: "displaytext": "TestVPC", "cidr": '10.0.0.1/24' }, + "netscaler": { + "ipaddress": '10.102.192.50', + "username": 'nsroot', + "password": 'nsroot', + "networkdevicetype": 'NetscalerVPXLoadBalancer', + "publicinterface": '1/3', + "privateinterface": '1/4', + "numretries": 2, + "lbdevicededicated": True, + "lbdevicecapacity": 50, + "port": 22, + }, "network": { "name": "Test Network", "displaytext": "Test Network", @@ -172,7 +213,7 @@ class Services: "timeout": 10, } - +@ddt class TestVPCNetwork(cloudstackTestCase): @classmethod @@ -199,12 +240,17 @@ class TestVPCNetwork(cloudstackTestCase): cls.services["service_offering"] ) cls._cleanup.append(cls.service_offering) - cls.vpc_off = VpcOffering.create( - cls.api_client, - cls.services["vpc_offering"] - ) - cls._cleanup.append(cls.vpc_off) - cls.vpc_off.update(cls.api_client, state='Enabled') + # Configure Netscaler device + global NSconfigured + + try: + cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.services["netscaler"]) + cls._cleanup.append(cls.netscaler) + NSconfigured = True + except Exception as e: + NSconfigured = False + raise Exception ("Warning: Exception in setUpClass: %s" % e) + return @classmethod @@ -287,9 +333,10 @@ class TestVPCNetwork(cloudstackTestCase): ) self.debug("VPC network validated - %s" % network.name) return - + + @data("network_offering", "network_offering_vpcNS") @attr(tags=["advanced", "intervlan"]) - def test_01_create_network(self): + def test_01_create_network(self, value): """ Test create network in VPC """ @@ -305,20 +352,26 @@ class TestVPCNetwork(cloudstackTestCase): # 5. Create a network using the network offering created in step2 as # part of this VPC. - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + if (value == "network_offering_vpcNS" and NSconfigured == False): + self.skipTest('Netscaler not configured: skipping test') - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -332,7 +385,7 @@ class TestVPCNetwork(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -380,11 +433,12 @@ class TestVPCNetwork(cloudstackTestCase): ) return + @data("network_offering", "network_offering_vpcNS") @attr(tags=["advanced", "intervlan"]) - def test_02_create_network_fail(self): + def test_02_create_network_fail(self, value): """ Test create network in VPC mismatched services (Should fail) """ - + # Validate the following # 1. Create VPC Offering by specifying all supported Services # (Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) @@ -398,20 +452,26 @@ class TestVPCNetwork(cloudstackTestCase): # part of this VPC. # 6. Network creation should fail - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcNS" and NSconfigured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) + self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -423,13 +483,13 @@ class TestVPCNetwork(cloudstackTestCase): ) self.validate_vpc_network(vpc) - self.services["network_offering"]["supportedservices"] = 'SourceNat' - self.services["network_offering"]["serviceProviderList"] = { + #self.services[value]["supportedservices"] = 'SourceNat' + self.services[value]["serviceProviderList"] = { "SourceNat": 'VirtualRouter', } self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -452,99 +512,9 @@ class TestVPCNetwork(cloudstackTestCase): ) return - @attr(tags=["netscaler", "intervlan"]) - def test_03_create_network_netscaler(self): - """ Test create network using netscaler for LB - """ - - # Validate the following - # 1. Create VPC Offering by specifying all supported Services - # (Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) - # 2. Create a VPC using the above VPC offering - # 3. Create a network offering with guest type="Isolated that has - # LB services provided by Netscaler and all other services - # provided by VPCVR and conserver mode is "ON" - # 4. Create a VPC using the above VPC offering. - # 5. Create a network using the network offering created in step2 as - # part of this VPC - - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) - - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) - self.services["vpc"]["cidr"] = '10.1.1.1/16' - vpc = VPC.create( - self.apiclient, - self.services["vpc"], - vpcofferingid=vpc_off.id, - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid - ) - self.validate_vpc_network(vpc) - - self.network_offering = NetworkOffering.create( - self.apiclient, - self.services["network_off_netscaler"], - conservemode=False - ) - # Enable Network offering - self.network_offering.update(self.apiclient, state='Enabled') - self.cleanup.append(self.network_offering) - - # Creating network using the network offering created - self.debug("Creating network with network offering: %s" % - self.network_offering.id) - network = Network.create( - self.apiclient, - self.services["network"], - accountid=self.account.name, - domainid=self.account.domainid, - networkofferingid=self.network_offering.id, - zoneid=self.zone.id, - gateway='10.1.1.1', - vpcid=vpc.id - ) - self.debug("Created network with ID: %s" % network.id) - self.debug( - "Verifying list network response to check if network created?") - networks = Network.list( - self.apiclient, - id=network.id, - listall=True - ) - self.assertEqual( - isinstance(networks, list), - True, - "List networks should return a valid response" - ) - nw = networks[0] - - self.assertEqual( - nw.networkofferingid, - self.network_offering.id, - "Network should be created from network offering - %s" % - self.network_offering.id - ) - self.assertEqual( - nw.vpcid, - vpc.id, - "Network should be created in VPC: %s" % vpc.name - ) - return - + @data("network_offering", "network_offering_vpcNS") @attr(tags=["advanced", "intervlan"]) - def test_04_create_multiple_networks_with_lb(self): + def test_04_create_multiple_networks_with_lb(self, value): """ Test create multiple networks with LB service (Should fail) """ @@ -559,20 +529,26 @@ class TestVPCNetwork(cloudstackTestCase): # 5. Create another network using the network offering created in # step3 as part of this VPC - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcNS" and NSconfigured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -586,7 +562,7 @@ class TestVPCNetwork(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -664,20 +640,15 @@ class TestVPCNetwork(cloudstackTestCase): # 5. Create another network using the network offering created in # step3 as part of this VPC - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -689,12 +660,30 @@ class TestVPCNetwork(cloudstackTestCase): ) self.validate_vpc_network(vpc) + #with self.assertRaises(Exception): + self.network_offering = NetworkOffering.create( + self.apiclient, + self.services["network_offering_vpcNS"], + conservemode=False + ) + # Enable Network offering + self.network_offering.update(self.apiclient, state='Enabled') + self.cleanup.append(self.network_offering) + + # Creating network using the network offering created + self.debug("Creating network with network offering: %s" % + self.network_offering.id) with self.assertRaises(Exception): - NetworkOffering.create( - self.apiclient, - self.services["network_off_netscaler"], - conservemode=False - ) + Network.create( + self.apiclient, + self.services["network"], + accountid=self.account.name, + domainid=self.account.domainid, + networkofferingid=self.network_offering.id, + zoneid=self.zone.id, + gateway='10.1.1.1', + vpcid=vpc.id + ) self.debug("Network creation failed") return @@ -907,9 +896,10 @@ class TestVPCNetwork(cloudstackTestCase): ) self.debug("Network creation failed as VPC doesn't have LB service") return - + + @data("network_off_shared", "network_offering_vpcNS") @attr(tags=["advanced", "intervlan"]) - def test_09_create_network_shared_nwoff(self): + def test_09_create_network_shared_nwoff(self, value): """ Test create network with shared network offering """ @@ -923,20 +913,25 @@ class TestVPCNetwork(cloudstackTestCase): # 5. Create a network using the network offering created in step2 # as part of this VPC - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcNS" and NSconfigured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + if (value == "network_off_shared"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -977,8 +972,9 @@ class TestVPCNetwork(cloudstackTestCase): self.debug("Network creation failed") return + @data("network_offering", "network_offering_vpcNS") @attr(tags=["advanced", "intervlan"]) - def test_10_create_network_with_conserve_mode(self): + def test_10_create_network_with_conserve_mode(self, value): """ Test create network with conserve mode ON """ @@ -993,20 +989,25 @@ class TestVPCNetwork(cloudstackTestCase): # 5. Create a network using the network offering created in step2 as # part of this VPC - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcNS" and NSconfigured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -1023,13 +1024,14 @@ class TestVPCNetwork(cloudstackTestCase): with self.assertRaises(Exception): NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=True ) self.debug( "Network creation failed as VPC support nw with conserve mode OFF") return +@ddt class TestVPCNetworkRanges(cloudstackTestCase): @classmethod @@ -1056,12 +1058,19 @@ class TestVPCNetworkRanges(cloudstackTestCase): cls.services["service_offering"] ) cls._cleanup.append(cls.service_offering) - cls.vpc_off = VpcOffering.create( - cls.api_client, - cls.services["vpc_offering"] - ) - cls.vpc_off.update(cls.api_client, state='Enabled') - cls._cleanup.append(cls.vpc_off) + # Configure Netscaler device + global NSconfigured + + try: + cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.services["netscaler"]) + cls._cleanup = [ + cls.netscaler + ] + NSconfigured = True + except Exception as e: + NSconfigured = False + raise Exception ("Warning: Exception in setUpClass: %s" % e) + return @classmethod @@ -1144,8 +1153,9 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.debug("VPC network validated - %s" % network.name) return + @data("network_offering", "network_offering_vpcNS") @attr(tags=["advanced", "intervlan"]) - def test_01_create_network_outside_range(self): + def test_01_create_network_outside_range(self, value): """ Test create network outside cidr range of VPC """ @@ -1154,19 +1164,25 @@ class TestVPCNetworkRanges(cloudstackTestCase): # 2. Add network1 with cidr - 10.2.1.1/24 to this VPC # 3. Network creation should fail. - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcNS" and NSconfigured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network with cidr: 10.1.1.1/16") self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -1182,7 +1198,7 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -1268,8 +1284,9 @@ class TestVPCNetworkRanges(cloudstackTestCase): "Network creation failed as network cidr range is outside of vpc") return + @data("network_offering", "network_offering_vpcNS") @attr(tags=["advanced", "intervlan"]) - def test_03_create_network_inside_range(self): + def test_03_create_network_inside_range(self, value): """ Test create network inside cidr range of VPC """ @@ -1278,17 +1295,24 @@ class TestVPCNetworkRanges(cloudstackTestCase): # 2. Add network1 with cidr - 10.1.1.1/8 to this VPC # 3. Network creation should fail. - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcNS" and NSconfigured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) self.debug("creating a VPC network with cidr: 10.1.1.1/16") self.services["vpc"]["cidr"] = '10.1.1.1/16' @@ -1306,7 +1330,7 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -1333,8 +1357,9 @@ class TestVPCNetworkRanges(cloudstackTestCase): "Network creation failed as network cidr range is inside of vpc") return + @data("network_offering", "network_offering_vpcNS") @attr(tags=["advanced", "intervlan"]) - def test_04_create_network_overlapping_range(self): + def test_04_create_network_overlapping_range(self, value): """ Test create network overlapping cidr range of VPC """ @@ -1345,19 +1370,24 @@ class TestVPCNetworkRanges(cloudstackTestCase): # 4. Add network3 with cidr - 10.1.1.1/26 to this VPC # 5. Network creation in step 3 & 4 should fail. self.services = Services().services + if (value == "network_offering_vpcNS" and NSconfigured == False): + self.skipTest('Netscaler not configured: skipping test') - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) - - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) self.debug("creating a VPC network with cidr: 10.1.1.1/16") self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( @@ -1374,7 +1404,7 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -1457,8 +1487,9 @@ class TestVPCNetworkRanges(cloudstackTestCase): "Network creation failed as network range overlaps each other") return + @data("network_offering", "network_offering_vpcNS") @attr(tags=["advanced", "intervlan"]) - def test_05_create_network_diff_account(self): + def test_05_create_network_diff_account(self, value): """ Test create network from different account in VPC """ @@ -1467,17 +1498,24 @@ class TestVPCNetworkRanges(cloudstackTestCase): # 2. Add network1 with cidr - 10.1.1.1/24 to this VPC # 3. Network creation should fail. - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcNS" and NSconfigured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) self.debug("creating a VPC network with cidr: 10.1.1.1/16") self.services["vpc"]["cidr"] = '10.1.1.1/16' @@ -1495,7 +1533,7 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -1532,7 +1570,6 @@ class TestVPCNetworkRanges(cloudstackTestCase): "Network creation failed as VPC belongs to different account") return - class TestVPCNetworkUpgrade(cloudstackTestCase): @classmethod @@ -1559,12 +1596,7 @@ class TestVPCNetworkUpgrade(cloudstackTestCase): cls.services["service_offering"] ) cls._cleanup.append(cls.service_offering) - cls.vpc_off = VpcOffering.create( - cls.api_client, - cls.services["vpc_offering"] - ) - cls.vpc_off.update(cls.api_client, state='Enabled') - cls._cleanup.append(cls.vpc_off) + return @classmethod @@ -1646,7 +1678,7 @@ class TestVPCNetworkUpgrade(cloudstackTestCase): ) self.debug("VPC network validated - %s" % network.name) return - + @attr(tags=["advanced", "intervlan"]) def test_01_network_services_upgrade(self): """ Test update Network that is part of a VPC to a network offering that has more services @@ -1666,19 +1698,16 @@ class TestVPCNetworkUpgrade(cloudstackTestCase): # 8. Update network1 to NO2. self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + if isinstance(vpc_off_list, list): + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -2112,7 +2141,6 @@ class TestVPCNetworkUpgrade(cloudstackTestCase): ) return - class TestVPCNetworkGc(cloudstackTestCase): @classmethod @@ -2354,26 +2382,16 @@ class TestVPCNetworkGc(cloudstackTestCase): self.debug("Waiting for network garbage collection thread to run") # Wait for the network garbage collection thread to run wait_for_cleanup(self.apiclient, - ["network.gc.interval", "network.gc.wait"]*2) - self.debug("Check if the VPC router is in stopped state?") - routers = Router.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid, - listall=True - ) - self.assertEqual( - isinstance(routers, list), - True, - "List routers shall return a valid response" - ) - router = routers[0] - # TODO: Add some more assertions - self.assertEqual( - router.state, - "Stopped", - "Router state should be stopped after network gc" - ) + ["network.gc.interval", "network.gc.wait"]) + + #Bug???: Network Acls are not cleared + netacls = NetworkACL.list(self.apiclient, networkid=self.network_1.id) + self.debug("List of NetACLS %s" % netacls) + self.assertEqual(netacls, None, "Netacls were not cleared after network GC thread is run") + + lbrules = LoadBalancerRule.list(self.apiclient, networkid=self.network_1.id) + self.debug("List of LB Rules %s" % lbrules) + self.assertEqual(lbrules, None, "LBrules were not cleared after network GC thread is run") return @attr(tags=["advanced", "intervlan"]) diff --git a/test/integration/component/test_vpc_vm_life_cycle.py b/test/integration/component/test_vpc_vm_life_cycle.py index 9844c1f8922..425c2848f96 100644 --- a/test/integration/component/test_vpc_vm_life_cycle.py +++ b/test/integration/component/test_vpc_vm_life_cycle.py @@ -189,7 +189,6 @@ class Services: "mode": 'advanced' } - class TestVMLifeCycleVPC(cloudstackTestCase): @classmethod @@ -927,13 +926,6 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): domainid=cls.domain.id ) - cls.vpc_off = VpcOffering.create( - cls.api_client, - cls.services["vpc_offering"] - ) - - cls.vpc_off.update(cls.api_client, state='Enabled') - cls.services["vpc"]["cidr"] = '10.1.1.1/16' cls.vpc = VPC.create( cls.api_client, @@ -984,6 +976,10 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): cls.services["network"]["physicalnetworkid"] = physical_network.id cls.services["network"]["vlan"] = shared_vlan + # Start Ip and End Ip should be specified for shared network + cls.services["network"]["startip"] = '10.1.2.20' + cls.services["network"]["endip"] = '10.1.2.30' + # Creating network using the network offering created cls.network_2 = Network.create( cls.api_client, @@ -994,7 +990,7 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): zoneid=cls.zone.id, gateway='10.1.2.1', ) - # Spawn an instance in that network + cls.vm_1 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], @@ -1004,7 +1000,7 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): networkids=[str(cls.network_1.id), str(cls.network_2.id)] ) - # Spawn an instance in that network + cls.vm_2 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], @@ -1014,6 +1010,8 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): networkids=[str(cls.network_1.id), str(cls.network_2.id)] ) + + cls.vm_3 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], @@ -1023,6 +1021,7 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): networkids=[str(cls.network_1.id), str(cls.network_2.id)] ) + cls.public_ip_1 = PublicIPAddress.create( cls.api_client, accountid=cls.account.name, @@ -1040,7 +1039,10 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): vpcid=cls.vpc.id, domainid=cls.account.domainid ) - cls.lb_rule.assign(cls.api_client, [cls.vm_1, cls.vm_2, cls.vm_3]) + + # Only the vms in the same network can be added to load balancing rule + # hence we can't add vm_2 with vm_1 + cls.lb_rule.assign(cls.api_client, [cls.vm_1]) cls.public_ip_2 = PublicIPAddress.create( cls.api_client, @@ -1084,16 +1086,20 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): ) cls._cleanup = [ cls.account, - cls.service_offering, + cls.network_2, cls.nw_off, cls.shared_nw_off, - cls.vpc_off + cls.vpc_off, + cls.service_offering, ] return @classmethod def tearDownClass(cls): try: + cls.vpc_off.update(cls.api_client, state='Disabled') + cls.shared_nw_off.update(cls.api_client, state='Disabled') + cls.nw_off.update(cls.api_client, state='Disabled') cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) @@ -1381,6 +1387,9 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): except Exception as e: self.fail("Failed to destroy the virtual instances, %s" % e) + #Wait for expunge interval to cleanup VM + wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) + self.debug("Check if the instance is in stopped state?") vms = VirtualMachine.list( self.apiclient, @@ -1388,15 +1397,9 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): listall=True ) self.assertEqual( - isinstance(vms, list), - True, - "List virtual machines should return a valid list" - ) - vm = vms[0] - self.assertEqual( - vm.state, - "Expunging", - "Virtual machine should be in expunging state" + vms, + None, + "List virtual machines should not return anything" ) self.debug("Validating if network rules are coonfigured properly?") @@ -1652,7 +1655,7 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): ["expunge.interval", "expunge.delay"] ) - # Check if the network rules still exists after Vm expunged + # Check if the network rules still exists after Vm expunged self.debug("Checking if NAT rules existed ") with self.assertRaises(Exception): nat_rules = NATRule.list( @@ -1668,7 +1671,6 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): ) return - class TestVMLifeCycleBothIsolated(cloudstackTestCase): @classmethod @@ -2004,7 +2006,6 @@ class TestVMLifeCycleBothIsolated(cloudstackTestCase): ) return - class TestVMLifeCycleStoppedVPCVR(cloudstackTestCase): @classmethod @@ -2690,7 +2691,7 @@ class TestVMLifeCycleStoppedVPCVR(cloudstackTestCase): ["expunge.interval", "expunge.delay"] ) - # Check if the network rules still exists after Vm expunged + # Check if the network rules still exists after Vm expunged self.debug("Checking if NAT rules existed ") with self.assertRaises(Exception): nat_rules = NATRule.list( diff --git a/tools/build/build_asf.sh b/tools/build/build_asf.sh index c2a817a8ffd..6170cd50df4 100755 --- a/tools/build/build_asf.sh +++ b/tools/build/build_asf.sh @@ -90,7 +90,7 @@ export currentversion=`mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:eval echo "found $currentversion" echo 'setting version numbers' -mvn versions:set -DnewVersion=$version -P vmware -P developer -P systemvm -P simulator -P baremetal -P ucs -Dnonoss +mvn versions:set -DnewVersion=$version -P vmware -P developer -P systemvm -P simulator -P baremetal -P ucs -Dnoredist mv deps/XenServerJava/pom.xml.versionsBackup deps/XenServerJava/pom.xml perl -pi -e "s/-SNAPSHOT//" deps/XenServerJava/pom.xml perl -pi -e "s/-SNAPSHOT//" tools/apidoc/pom.xml diff --git a/tools/build/setnextversion.sh b/tools/build/setnextversion.sh index 7da3765704a..a41676db6db 100755 --- a/tools/build/setnextversion.sh +++ b/tools/build/setnextversion.sh @@ -62,7 +62,7 @@ export currentversion=`mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:eval echo "found $currentversion" echo 'setting version numbers' -mvn versions:set -DnewVersion=$version -P vmware -P developer -P systemvm -P simulator -P baremetal -P ucs -Dnonoss +mvn versions:set -DnewVersion=$version -P vmware -P developer -P systemvm -P simulator -P baremetal -P ucs -Dnoredist mv deps/XenServerJava/pom.xml.versionsBackup deps/XenServerJava/pom.xml perl -pi -e "s/$currentversion/$version/" deps/XenServerJava/pom.xml perl -pi -e "s/$currentversion/$version/" tools/apidoc/pom.xml diff --git a/tools/marvin/marvin/deployAndRun.py b/tools/marvin/marvin/deployAndRun.py index d162fb39685..8a758a10e36 100644 --- a/tools/marvin/marvin/deployAndRun.py +++ b/tools/marvin/marvin/deployAndRun.py @@ -73,20 +73,24 @@ if __name__ == "__main__": n = 0 while(n < iterates): engine = \ - TestCaseExecuteEngine.TestCaseExecuteEngine(deploy.testClient, - deploy.getCfg(), - testCaseLogFile, - testResultLogFile) + TestCaseExecuteEngine.TestCaseExecuteEngine( + deploy.testClient, + deploy.getCfg( + ), + testCaseLogFile, + testResultLogFile) engine.loadTestsFromFile(options.module) engine.run() n = n + 1 else: n = 0 - while(n 0) { + for (var i = 0; i < domains.length; i++) { + array1.push({ + id: domains[i].id, + description: domains[i].path + }); + } + } + args.response.success({ + data: array1 + }); + } + }); + } + }, + account: { + label: 'label.account', + validation: { + required: true + } + } + } + }, + action: function(args) { + $.ajax({ + url: createURL('assignVirtualMachine&virtualmachine'), + data: { + virtualmachineid: args.context.instances[0].id, + domainid: args.data.domainid, + account: args.data.account + }, + success: function(json) { + var item = json.virtualmachine.virtualmachine; + args.response.success({ + data: item + }); + } + }); + }, + messages: { + notification: function(args) { + return 'Assign Instance to Another Account'; + } + }, + notification: { + poll: function(args) { + args.complete(); + } + } + }, + viewConsole: { label: 'label.view.console', action: { @@ -1924,6 +1992,11 @@ if (jsonObj.hypervisor == "BareMetal") { allowedActions.push("createTemplate"); } + + if (isAdmin() || isDomainAdmin()) { + allowedActions.push("assignVmToAnotherAccount"); + } + } else if (jsonObj.state == 'Starting') { // allowedActions.push("stop"); } else if (jsonObj.state == 'Error') { diff --git a/utils/src/com/cloud/utils/S3Utils.java b/utils/src/com/cloud/utils/S3Utils.java index 5ee578304d5..ce4d4b723e5 100644 --- a/utils/src/com/cloud/utils/S3Utils.java +++ b/utils/src/com/cloud/utils/S3Utils.java @@ -48,6 +48,7 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.log4j.Logger; import com.amazonaws.AmazonClientException; +import com.amazonaws.AmazonServiceException; import com.amazonaws.ClientConfiguration; import com.amazonaws.HttpMethod; import com.amazonaws.auth.AWSCredentials; @@ -61,6 +62,9 @@ import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectSummary; +import com.amazonaws.services.s3.transfer.TransferManager; +import com.amazonaws.services.s3.transfer.Upload; + import com.cloud.utils.exception.CloudRuntimeException; public final class S3Utils { @@ -171,6 +175,58 @@ public final class S3Utils { } + // multi-part upload file + public static void mputFile(final ClientOptions clientOptions, + final File sourceFile, final String bucketName, final String key) throws InterruptedException { + + assert clientOptions != null; + assert sourceFile != null; + assert !isBlank(bucketName); + assert !isBlank(key); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(format("Multipart sending file %1$s as S3 object %2$s in " + + "bucket %3$s", sourceFile.getName(), key, bucketName)); + } + TransferManager tm = new TransferManager(S3Utils.acquireClient(clientOptions)); + Upload upload = tm.upload(bucketName, key, sourceFile); + upload.waitForCompletion(); + } + + // multi-part upload object + public static void mputObject(final ClientOptions clientOptions, + final InputStream sourceStream, final String bucketName, final String key) throws InterruptedException { + + assert clientOptions != null; + assert sourceStream != null; + assert !isBlank(bucketName); + assert !isBlank(key); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(format("Multipart sending stream as S3 object %1$s in " + + "bucket %2$s", key, bucketName)); + } + TransferManager tm = new TransferManager(S3Utils.acquireClient(clientOptions)); + Upload upload = tm.upload(bucketName, key, sourceStream, null); + upload.waitForCompletion(); + } + + // multi-part upload object + public static void mputObject(final ClientOptions clientOptions, + final PutObjectRequest req) throws InterruptedException { + + assert clientOptions != null; + assert req != null; + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Multipart sending object to S3 using PutObjectRequest"); + } + TransferManager tm = new TransferManager(S3Utils.acquireClient(clientOptions)); + Upload upload = tm.upload(req); + upload.waitForCompletion(); + + } + public static void setObjectAcl(final ClientOptions clientOptions, final String bucketName, final String key, final CannedAccessControlList acl) { diff --git a/utils/src/com/cloud/utils/backoff/impl/ConstantTimeBackoff.java b/utils/src/com/cloud/utils/backoff/impl/ConstantTimeBackoff.java index 976e3690082..4386d4d96f9 100755 --- a/utils/src/com/cloud/utils/backoff/impl/ConstantTimeBackoff.java +++ b/utils/src/com/cloud/utils/backoff/impl/ConstantTimeBackoff.java @@ -19,16 +19,19 @@ package com.cloud.utils.backoff.impl; import java.util.Collection; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import javax.ejb.Local; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + import com.cloud.utils.NumbersUtil; import com.cloud.utils.backoff.BackoffAlgorithm; import com.cloud.utils.component.AdapterBase; /** - * Implementation of the Agent Manager. This class controls the connection + * An implementation of BackoffAlgorithm that waits for some seconds. + * After the time the client can try to perform the operation again. * * @config * {@table @@ -38,27 +41,29 @@ import com.cloud.utils.component.AdapterBase; **/ @Local(value={BackoffAlgorithm.class}) public class ConstantTimeBackoff extends AdapterBase implements BackoffAlgorithm, ConstantTimeBackoffMBean { - int _count = 0; long _time; - ConcurrentHashMap _asleep = new ConcurrentHashMap(); + private final ConcurrentHashMap _asleep = new ConcurrentHashMap(); + private final static Log LOG = LogFactory.getLog(ConstantTimeBackoff.class); @Override public void waitBeforeRetry() { - _count++; + Thread current = Thread.currentThread(); try { - Thread current = Thread.currentThread(); _asleep.put(current.getName(), current); Thread.sleep(_time); + } catch (InterruptedException e) { + // JMX or other threads may interrupt this thread, but let's log it + // anyway, no exception to log as this is not an error + LOG.info("Thread " + current.getName() + + " interrupted while waiting for retry"); + } finally { _asleep.remove(current.getName()); - } catch(InterruptedException e) { - } return; } @Override public void reset() { - _count = 0; } @Override @@ -71,7 +76,7 @@ public class ConstantTimeBackoff extends AdapterBase implements BackoffAlgorithm public Collection getWaiters() { return _asleep.keySet(); } - + @Override public boolean wakeup(String threadName) { Thread th = _asleep.get(threadName); @@ -83,17 +88,6 @@ public class ConstantTimeBackoff extends AdapterBase implements BackoffAlgorithm return false; } - @Override - public boolean start() { - _count = 0; - return true; - } - - @Override - public boolean stop() { - return true; - } - @Override public long getTimeToWait() { return _time; diff --git a/utils/src/com/cloud/utils/script/Script.java b/utils/src/com/cloud/utils/script/Script.java index 04ad7c46ba0..b84ff2fe658 100755 --- a/utils/src/com/cloud/utils/script/Script.java +++ b/utils/src/com/cloud/utils/script/Script.java @@ -201,8 +201,8 @@ public class Script implements Callable { if (interpreter != null) { return interpreter.drain() ? task.getResult() : interpreter.interpret(ir); } else { - // null return is ok apparently - return (_process.exitValue() == 0) ? "Ok" : "Failed, exit code " + _process.exitValue(); + // null return exitValue apparently + return String.valueOf(_process.exitValue()); } } else { break; @@ -245,7 +245,7 @@ public class Script implements Callable { error = interpreter.processError(reader); } else { - error = "Non zero exit code : " + _process.exitValue(); + error = String.valueOf(_process.exitValue()); } if (_logger.isDebugEnabled()) { @@ -482,4 +482,26 @@ public class Script implements Callable { return result.trim(); } + public static int runSimpleBashScriptForExitValue(String command) { + return runSimpleBashScriptForExitValue(command, 0); + } + + public static int runSimpleBashScriptForExitValue(String command, int timeout) { + + Script s = new Script("/bin/bash", timeout); + s.add("-c"); + s.add(command); + + String result = s.execute(null); + if (result == null || result.trim().isEmpty()) + return -1; + else { + try { + return Integer.valueOf(result.trim()); + } catch (NumberFormatException e) { + return -1; + } + } + } + } diff --git a/utils/test/com/cloud/utils/backoff/impl/ConstantTimeBackoffTest.java b/utils/test/com/cloud/utils/backoff/impl/ConstantTimeBackoffTest.java new file mode 100644 index 00000000000..4b2b64d2d47 --- /dev/null +++ b/utils/test/com/cloud/utils/backoff/impl/ConstantTimeBackoffTest.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.utils.backoff.impl; + +import java.util.HashMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.junit.Assert; +import org.junit.Test; + +public class ConstantTimeBackoffTest { + final static private Log LOG = LogFactory + .getLog(ConstantTimeBackoffTest.class); + + @Test + public void waitBeforeRetryWithInterrupt() throws InterruptedException { + final ConstantTimeBackoff backoff = new ConstantTimeBackoff(); + backoff.setTimeToWait(10); + Assert.assertTrue(backoff.getWaiters().isEmpty()); + Thread waitThread = new Thread(new Runnable() { + @Override + public void run() { + backoff.waitBeforeRetry(); + } + }); + waitThread.start(); + Thread.sleep(100); + Assert.assertFalse(backoff.getWaiters().isEmpty()); + waitThread.interrupt(); + Thread.sleep(100); + Assert.assertTrue(backoff.getWaiters().isEmpty()); + } + + @Test + public void waitBeforeRetry() throws InterruptedException { + final ConstantTimeBackoff backoff = new ConstantTimeBackoff(); + // let's not wait too much in a test + backoff.setTimeToWait(0); + // check if the list of waiters is empty + Assert.assertTrue(backoff.getWaiters().isEmpty()); + // call the waitBeforeRetry which will wait 0 ms and return + backoff.waitBeforeRetry(); + // on normal exit the list of waiters should be cleared + Assert.assertTrue(backoff.getWaiters().isEmpty()); + } + + @Test + public void configureEmpty() { + // at this point this is the only way rhe configure method gets invoked, + // therefore have to make sure it works correctly + final ConstantTimeBackoff backoff = new ConstantTimeBackoff(); + backoff.configure("foo", new HashMap()); + Assert.assertEquals(5000, backoff.getTimeToWait()); + } + + @Test + public void configureWithValue() { + final ConstantTimeBackoff backoff = new ConstantTimeBackoff(); + HashMap params = new HashMap(); + params.put("seconds", "100"); + backoff.configure("foo", params); + Assert.assertEquals(100000, backoff.getTimeToWait()); + } + + /** + * Test that wakeup returns false when trying to wake a non existing thread. + */ + @Test + public void wakeupNotExisting() { + final ConstantTimeBackoff backoff = new ConstantTimeBackoff(); + Assert.assertFalse(backoff.wakeup("NOT EXISTING THREAD")); + } + + /** + * Test that wakeup will return true if the thread is waiting. + */ + @Test + public void wakeupExisting() throws InterruptedException { + final ConstantTimeBackoff backoff = new ConstantTimeBackoff(); + backoff.setTimeToWait(10); + Thread thread = new Thread(new Runnable() { + @Override + public void run() { + LOG.debug("before"); + backoff.waitBeforeRetry(); + LOG.debug("after"); + } + }); + thread.start(); + LOG.debug("thread started"); + Thread.sleep(100); + LOG.debug("testing wakeup"); + Assert.assertTrue(backoff.wakeup(thread.getName())); + } +} diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 56f2f389cbe..a9536cfb544 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -577,6 +577,14 @@ public class VirtualMachineMO extends BaseMO { } return null; } + + public boolean hasSnapshot() throws Exception { + VirtualMachineSnapshotInfo info = getSnapshotInfo(); + if(info != null) { + return info.getCurrentSnapshot() != null; + } + return false; + } public boolean createFullClone(String cloneName, ManagedObjectReference morFolder, ManagedObjectReference morResourcePool, ManagedObjectReference morDs) throws Exception { @@ -1629,6 +1637,7 @@ public class VirtualMachineMO extends BaseMO { public void tearDownDevices(Class[] deviceClasses) throws Exception { VirtualDevice[] devices = getMatchedDevices(deviceClasses); + if(devices.length > 0) { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[devices.length]; @@ -1637,9 +1646,9 @@ public class VirtualMachineMO extends BaseMO { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[i].setDevice(devices[i]); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.REMOVE); + vmConfigSpec.getDeviceChange().add(deviceConfigSpecArray[i]); } - vmConfigSpec.getDeviceChange().addAll(Arrays.asList(deviceConfigSpecArray)); if(!configureVm(vmConfigSpec)) { throw new Exception("Failed to detach devices"); } @@ -2021,6 +2030,10 @@ public class VirtualMachineMO extends BaseMO { return detachedDiskFiles; } + public List getAllDeviceList() throws Exception { + return (List)_context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); + } + public VirtualDisk[] getAllDiskDevice() throws Exception { List deviceList = new ArrayList(); List devices = (List)_context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); @@ -2034,6 +2047,19 @@ public class VirtualMachineMO extends BaseMO { return deviceList.toArray(new VirtualDisk[0]); } + + public VirtualDisk getDiskDeviceByBusName(List allDevices, String busName) throws Exception { + for(VirtualDevice device : allDevices ) { + if(device instanceof VirtualDisk) { + VirtualDisk disk = (VirtualDisk)device; + String diskBusName = getDeviceBusName(allDevices, (VirtualDevice)disk); + if(busName.equalsIgnoreCase(diskBusName)) + return disk; + } + } + + return null; + } public VirtualDisk[] getAllIndependentDiskDevice() throws Exception { List independentDisks = new ArrayList();