mirror of https://github.com/apache/cloudstack.git
commit
0b8355920e
|
|
@ -39,6 +39,7 @@
|
|||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<version>${cs.commons-io.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-daemon</groupId>
|
||||
|
|
|
|||
|
|
@ -45,9 +45,12 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba
|
|||
Destroy("The volume is destroyed, and can't be recovered."),
|
||||
Destroying("The volume is destroying, and can't be recovered."),
|
||||
UploadOp("The volume upload operation is in progress or in short the volume is on secondary storage"),
|
||||
Uploading("volume is uploading"),
|
||||
Copying("volume is copying from image store to primary, in case it's an uploaded volume"),
|
||||
Uploaded("volume is uploaded");
|
||||
Copying("Volume is copying from image store to primary, in case it's an uploaded volume"),
|
||||
Uploaded("Volume is uploaded"),
|
||||
NotUploaded("The volume entry is just created in DB, not yet uploaded"),
|
||||
UploadInProgress("Volume upload is in progress"),
|
||||
UploadError("Volume upload encountered some error"),
|
||||
UploadAbandoned("Volume upload is abandoned since the upload was never initiated within a specificed time");
|
||||
|
||||
String _description;
|
||||
|
||||
|
|
@ -95,12 +98,22 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba
|
|||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(Migrating, Event.OperationSucceeded, Ready, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(Migrating, Event.OperationFailed, Ready, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(Destroy, Event.OperationSucceeded, Destroy, Arrays.asList(new StateMachine2.Transition.Impact[]{StateMachine2.Transition.Impact.USAGE})));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(Destroy, Event.OperationFailed, Destroy, Arrays.asList(StateMachine2.Transition.Impact.USAGE)));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadOp, Event.OperationSucceeded, Uploaded, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadOp, Event.OperationFailed, Allocated, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(Uploaded, Event.DestroyRequested, Destroy, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(Expunged, Event.ExpungingRequested, Expunged, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(Expunged, Event.OperationSucceeded, Expunged, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(Expunged, Event.OperationFailed, Expunged,null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(Expunged, Event.OperationFailed, Expunged, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(NotUploaded, Event.OperationTimeout, UploadAbandoned, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(NotUploaded, Event.UploadRequested, UploadInProgress, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(NotUploaded, Event.OperationSucceeded, Uploaded, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(NotUploaded, Event.OperationFailed, UploadError, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadInProgress, Event.OperationSucceeded, Uploaded, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadInProgress, Event.OperationFailed, UploadError, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadInProgress, Event.OperationTimeout, UploadError, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadError, Event.DestroyRequested, Destroy, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadAbandoned, Event.DestroyRequested, Destroy, null));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -120,7 +133,8 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba
|
|||
SnapshotRequested,
|
||||
DestroyRequested,
|
||||
ExpungingRequested,
|
||||
ResizeRequested;
|
||||
ResizeRequested,
|
||||
OperationTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd;
|
|||
import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.ExtractVolumeCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.GetUploadParamsForVolumeCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
|
||||
|
|
@ -29,6 +30,9 @@ import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
|
|||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.user.Account;
|
||||
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
|
||||
public interface VolumeApiService {
|
||||
/**
|
||||
|
|
@ -72,6 +76,8 @@ public interface VolumeApiService {
|
|||
*/
|
||||
Volume uploadVolume(UploadVolumeCmd cmd) throws ResourceAllocationException;
|
||||
|
||||
GetUploadParamsResponse uploadVolume(GetUploadParamsForVolumeCmd cmd) throws ResourceAllocationException, MalformedURLException;
|
||||
|
||||
boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException;
|
||||
|
||||
Volume attachVolumeToVM(AttachVolumeCmd command);
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
// under the License.
|
||||
package com.cloud.template;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.List;
|
||||
|
||||
|
|
@ -29,6 +30,7 @@ import org.apache.cloudstack.api.command.user.template.CopyTemplateCmd;
|
|||
import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.UpdateTemplateCmd;
|
||||
|
||||
|
|
@ -37,11 +39,14 @@ import com.cloud.exception.ResourceAllocationException;
|
|||
import com.cloud.exception.StorageUnavailableException;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
|
||||
|
||||
public interface TemplateApiService {
|
||||
|
||||
VirtualMachineTemplate registerTemplate(RegisterTemplateCmd cmd) throws URISyntaxException, ResourceAllocationException;
|
||||
|
||||
public GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException;
|
||||
|
||||
VirtualMachineTemplate registerIso(RegisterIsoCmd cmd) throws IllegalArgumentException, ResourceAllocationException;
|
||||
|
||||
VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUnavailableException, ResourceAllocationException;
|
||||
|
|
|
|||
|
|
@ -26,10 +26,41 @@ import org.apache.cloudstack.api.InternalIdentity;
|
|||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.Storage.TemplateType;
|
||||
import com.cloud.storage.Volume.Event;
|
||||
import com.cloud.storage.Volume.State;
|
||||
import com.cloud.utils.fsm.StateMachine2;
|
||||
import com.cloud.utils.fsm.StateObject;
|
||||
|
||||
public interface VirtualMachineTemplate extends ControlledEntity, Identity, InternalIdentity {
|
||||
public interface VirtualMachineTemplate extends ControlledEntity, Identity, InternalIdentity, StateObject<VirtualMachineTemplate.State> {
|
||||
enum State {
|
||||
Active, Inactive;
|
||||
Active,
|
||||
Inactive,
|
||||
NotUploaded,
|
||||
UploadInProgress,
|
||||
UploadError,
|
||||
UploadAbandoned;
|
||||
|
||||
public static StateMachine2<State, Event, VirtualMachineTemplate> getStateMachine() {
|
||||
return s_fsm;
|
||||
}
|
||||
|
||||
private final static StateMachine2<State, Event, VirtualMachineTemplate> s_fsm = new StateMachine2<State, Event, VirtualMachineTemplate>();
|
||||
static {
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(NotUploaded, Event.OperationTimeout, UploadAbandoned, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(NotUploaded, Event.UploadRequested, UploadInProgress, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(NotUploaded, Event.OperationSucceeded, Active, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(NotUploaded, Event.OperationFailed, UploadError, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadInProgress, Event.OperationSucceeded, Active, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadInProgress, Event.OperationFailed, UploadError, null));
|
||||
s_fsm.addTransition(new StateMachine2.Transition<State, Event>(UploadInProgress, Event.OperationTimeout, UploadError, null));
|
||||
}
|
||||
}
|
||||
|
||||
enum Event {
|
||||
OperationFailed,
|
||||
OperationSucceeded,
|
||||
UploadRequested,
|
||||
OperationTimeout;
|
||||
}
|
||||
|
||||
public static enum BootloaderType {
|
||||
|
|
@ -47,6 +78,7 @@ public interface VirtualMachineTemplate extends ControlledEntity, Identity, Inte
|
|||
all // all templates (only usable by admins)
|
||||
}
|
||||
|
||||
@Override
|
||||
State getState();
|
||||
|
||||
boolean isFeatured();
|
||||
|
|
@ -100,4 +132,10 @@ public interface VirtualMachineTemplate extends ControlledEntity, Identity, Inte
|
|||
Map getDetails();
|
||||
|
||||
boolean isDynamicallyScalable();
|
||||
|
||||
long getUpdatedCount();
|
||||
|
||||
void incrUpdatedCount();
|
||||
|
||||
Date getUpdated();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.api;
|
||||
|
||||
import org.apache.cloudstack.api.response.DomainResponse;
|
||||
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
|
||||
import org.apache.cloudstack.api.response.ProjectResponse;
|
||||
import org.apache.cloudstack.api.response.ZoneResponse;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.net.URL;
|
||||
import java.util.UUID;
|
||||
|
||||
public abstract class AbstractGetUploadParamsCmd extends BaseCmd {
|
||||
|
||||
public static final Logger s_logger = Logger.getLogger(AbstractGetUploadParamsCmd.class.getName());
|
||||
|
||||
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "the name of the volume/template")
|
||||
private String name;
|
||||
|
||||
@Parameter(name = ApiConstants.FORMAT, type = CommandType.STRING, required = true, description = "the format for the volume/template. Possible values include QCOW2, OVA, "
|
||||
+ "and VHD.")
|
||||
private String format;
|
||||
|
||||
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true, description = "the ID of the zone the volume/template is "
|
||||
+ "to be hosted on")
|
||||
private Long zoneId;
|
||||
|
||||
@Parameter(name = ApiConstants.CHECKSUM, type = CommandType.STRING, description = "the MD5 checksum value of this volume/template")
|
||||
private String checksum;
|
||||
|
||||
@Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional accountName. Must be used with domainId.")
|
||||
private String accountName;
|
||||
|
||||
@Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "an optional domainId. If the account parameter is used, "
|
||||
+ "domainId must also be used.")
|
||||
private Long domainId;
|
||||
|
||||
@Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "Upload volume/template for the project")
|
||||
private Long projectId;
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public String getFormat() {
|
||||
return format;
|
||||
}
|
||||
|
||||
public Long getZoneId() {
|
||||
return zoneId;
|
||||
}
|
||||
|
||||
public String getChecksum() {
|
||||
return checksum;
|
||||
}
|
||||
|
||||
public String getAccountName() {
|
||||
return accountName;
|
||||
}
|
||||
|
||||
public Long getDomainId() {
|
||||
return domainId;
|
||||
}
|
||||
|
||||
public Long getProjectId() {
|
||||
return projectId;
|
||||
}
|
||||
|
||||
public GetUploadParamsResponse createGetUploadParamsResponse(UUID id, URL postURL, String metadata, String timeout, String signature) {
|
||||
return new GetUploadParamsResponse(id, postURL, metadata, timeout, signature);
|
||||
}
|
||||
}
|
||||
|
|
@ -191,6 +191,7 @@ public class ApiConstants {
|
|||
public static final String PORTAL = "portal";
|
||||
public static final String PORTABLE_IP_ADDRESS = "portableipaddress";
|
||||
public static final String PORT_FORWARDING_SERVICE_ID = "portforwardingserviceid";
|
||||
public static final String POST_URL = "postURL";
|
||||
public static final String PRIVATE_INTERFACE = "privateinterface";
|
||||
public static final String PRIVATE_IP = "privateip";
|
||||
public static final String PRIVATE_PORT = "privateport";
|
||||
|
|
@ -616,6 +617,7 @@ public class ApiConstants {
|
|||
public static final String REGION_LEVEL_VPC = "regionlevelvpc";
|
||||
public static final String STRECHED_L2_SUBNET = "strechedl2subnet";
|
||||
public static final String NETWORK_SPANNED_ZONES = "zonesnetworkspans";
|
||||
public static final String METADATA = "metadata";
|
||||
public static final String PHYSICAL_SIZE = "physicalsize";
|
||||
public static final String OVM3_POOL = "ovm3pool";
|
||||
public static final String OVM3_CLUSTER = "ovm3cluster";
|
||||
|
|
|
|||
|
|
@ -0,0 +1,183 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.api.command.user.template;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.AbstractGetUploadParamsCmd;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
import org.apache.cloudstack.api.Parameter;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
|
||||
import org.apache.cloudstack.api.response.GuestOSResponse;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
|
||||
@APICommand(name = "getUploadParamsForTemplate", description = "upload an existing template into the CloudStack cloud. ", responseObject = GetUploadParamsResponse.class, since =
|
||||
"4.6.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
|
||||
public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd {
|
||||
public static final Logger s_logger = Logger.getLogger(GetUploadParamsForTemplateCmd.class.getName());
|
||||
|
||||
private static final String s_name = "postuploadtemplateresponse";
|
||||
|
||||
@Parameter(name = ApiConstants.DISPLAY_TEXT, type = CommandType.STRING, required = true, description = "the display text of the template. This is usually used for display purposes.", length = 4096)
|
||||
private String displayText;
|
||||
|
||||
@Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, required = true, description = "the target hypervisor for the template")
|
||||
private String hypervisor;
|
||||
|
||||
@Parameter(name = ApiConstants.OS_TYPE_ID, type = CommandType.UUID, entityType = GuestOSResponse.class, required = true, description = "the ID of the OS Type that best represents the OS of this template.")
|
||||
private Long osTypeId;
|
||||
|
||||
@Parameter(name = ApiConstants.BITS, type = CommandType.INTEGER, description = "32 or 64 bits support. 64 by default")
|
||||
private Integer bits;
|
||||
|
||||
@Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, description = "Template details in key/value pairs.")
|
||||
private Map details;
|
||||
|
||||
@Parameter(name = ApiConstants.IS_DYNAMICALLY_SCALABLE, type = CommandType.BOOLEAN, description = "true if template contains XS/VMWare tools inorder to support dynamic scaling of VM cpu/memory")
|
||||
private Boolean isDynamicallyScalable;
|
||||
|
||||
@Parameter(name = ApiConstants.IS_EXTRACTABLE, type = CommandType.BOOLEAN, description = "true if the template or its derivatives are extractable; default is false")
|
||||
private Boolean extractable;
|
||||
|
||||
@Parameter(name = ApiConstants.IS_FEATURED, type = CommandType.BOOLEAN, description = "true if this template is a featured template, false otherwise")
|
||||
private Boolean featured;
|
||||
|
||||
@Parameter(name = ApiConstants.IS_PUBLIC, type = CommandType.BOOLEAN, description = "true if the template is available to all accounts; default is true")
|
||||
private Boolean publicTemplate;
|
||||
|
||||
@Parameter(name = ApiConstants.ROUTING, type = CommandType.BOOLEAN, description = "true if the template type is routing i.e., if template is used to deploy router")
|
||||
private Boolean isRoutingType;
|
||||
|
||||
@Parameter(name = ApiConstants.PASSWORD_ENABLED, type = CommandType.BOOLEAN, description = "true if the template supports the password reset feature; default is false")
|
||||
private Boolean passwordEnabled;
|
||||
|
||||
@Parameter(name = ApiConstants.REQUIRES_HVM, type = CommandType.BOOLEAN, description = "true if this template requires HVM")
|
||||
private Boolean requiresHvm;
|
||||
|
||||
@Parameter(name = ApiConstants.SSHKEY_ENABLED, type = CommandType.BOOLEAN, description = "true if the template supports the sshkey upload feature; default is false")
|
||||
private Boolean sshKeyEnabled;
|
||||
|
||||
@Parameter(name = ApiConstants.TEMPLATE_TAG, type = CommandType.STRING, description = "the tag for this template.")
|
||||
private String templateTag;
|
||||
|
||||
public String getDisplayText() {
|
||||
return displayText;
|
||||
}
|
||||
|
||||
public String getHypervisor() {
|
||||
return hypervisor;
|
||||
}
|
||||
|
||||
public Long getOsTypeId() {
|
||||
return osTypeId;
|
||||
}
|
||||
|
||||
public Integer getBits() {
|
||||
return bits;
|
||||
}
|
||||
|
||||
public Map getDetails() {
|
||||
if (details == null || details.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
Collection paramsCollection = details.values();
|
||||
Map params = (Map)(paramsCollection.toArray())[0];
|
||||
return params;
|
||||
}
|
||||
|
||||
public Boolean isDynamicallyScalable() {
|
||||
if (isDynamicallyScalable == null) {
|
||||
return Boolean.FALSE;
|
||||
}
|
||||
return isDynamicallyScalable;
|
||||
}
|
||||
|
||||
public Boolean isExtractable() {
|
||||
return extractable;
|
||||
}
|
||||
|
||||
public Boolean isFeatured() {
|
||||
return featured;
|
||||
}
|
||||
|
||||
public Boolean isPublic() {
|
||||
return publicTemplate;
|
||||
}
|
||||
|
||||
public Boolean isRoutingType() {
|
||||
return isRoutingType;
|
||||
}
|
||||
|
||||
public Boolean isPasswordEnabled() {
|
||||
return passwordEnabled;
|
||||
}
|
||||
|
||||
public Boolean getRequiresHvm() {
|
||||
return requiresHvm;
|
||||
}
|
||||
|
||||
public Boolean isSshKeyEnabled() {
|
||||
return sshKeyEnabled;
|
||||
}
|
||||
|
||||
public String getTemplateTag() {
|
||||
return templateTag;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute() throws ServerApiException {
|
||||
validateRequest();
|
||||
try {
|
||||
GetUploadParamsResponse response = _templateService.registerTemplateForPostUpload(this);
|
||||
response.setResponseName(getCommandName());
|
||||
setResponseObject(response);
|
||||
} catch (ResourceAllocationException | MalformedURLException e) {
|
||||
s_logger.error("exception while registering template", e);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "exception while registering template: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void validateRequest() {
|
||||
if (getZoneId() <= 0) {
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "invalid zoneid");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEntityOwnerId() {
|
||||
Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true);
|
||||
if (accountId == null) {
|
||||
return CallContext.current().getCallingAccount().getId();
|
||||
}
|
||||
return accountId;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.api.command.user.volume;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.AbstractGetUploadParamsCmd;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
import org.apache.cloudstack.api.Parameter;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.api.response.DiskOfferingResponse;
|
||||
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
@APICommand(name = "getUploadParamsForVolume", description = "Upload a data disk to the cloudstack cloud.", responseObject = GetUploadParamsResponse.class, since = "4.6.0",
|
||||
requestHasSensitiveInfo= false, responseHasSensitiveInfo = false)
|
||||
public class GetUploadParamsForVolumeCmd extends AbstractGetUploadParamsCmd {
|
||||
public static final Logger s_logger = Logger.getLogger(GetUploadParamsForVolumeCmd.class.getName());
|
||||
|
||||
private static final String s_name = "postuploadvolumeresponse";
|
||||
|
||||
@Parameter(name = ApiConstants.IMAGE_STORE_UUID, type = CommandType.STRING, description = "Image store uuid")
|
||||
private String imageStoreUuid;
|
||||
|
||||
@Parameter(name = ApiConstants.DISK_OFFERING_ID, required = false, type = CommandType.UUID, entityType = DiskOfferingResponse.class, description = "the ID of the disk "
|
||||
+ "offering. This must be a custom sized offering since during upload of volume/template size is unknown.")
|
||||
private Long diskOfferingId;
|
||||
|
||||
public String getImageStoreUuid() {
|
||||
return imageStoreUuid;
|
||||
}
|
||||
|
||||
public Long getDiskOfferingId() {
|
||||
return diskOfferingId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute() throws ServerApiException {
|
||||
|
||||
try {
|
||||
GetUploadParamsResponse response = _volumeService.uploadVolume(this);
|
||||
response.setResponseName(getCommandName());
|
||||
setResponseObject(response);
|
||||
} catch (MalformedURLException | ResourceAllocationException e) {
|
||||
s_logger.error("exception while uploading volume", e);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "exception while uploading a volume: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEntityOwnerId() {
|
||||
Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true);
|
||||
if (accountId == null) {
|
||||
return CallContext.current().getCallingAccount().getId();
|
||||
}
|
||||
return accountId;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.api.response;
|
||||
|
||||
import java.net.URL;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.BaseResponse;
|
||||
|
||||
import com.cloud.serializer.Param;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
public class GetUploadParamsResponse extends BaseResponse {
|
||||
|
||||
@SerializedName(ApiConstants.ID)
|
||||
@Param(description = "the template/volume ID")
|
||||
private UUID id;
|
||||
|
||||
@SerializedName(ApiConstants.POST_URL)
|
||||
@Param(description = "POST url to upload the file to")
|
||||
private URL postURL;
|
||||
|
||||
@SerializedName(ApiConstants.METADATA)
|
||||
@Param(description = "encrypted data to be sent in the POST request.")
|
||||
private String metadata;
|
||||
|
||||
@SerializedName(ApiConstants.EXPIRES)
|
||||
@Param(description = "the timestamp after which the signature expires")
|
||||
private String expires;
|
||||
|
||||
@SerializedName(ApiConstants.SIGNATURE)
|
||||
@Param(description = "signature to be sent in the POST request.")
|
||||
private String signature;
|
||||
|
||||
public GetUploadParamsResponse(UUID id, URL postURL, String metadata, String expires, String signature) {
|
||||
this.id = id;
|
||||
this.postURL = postURL;
|
||||
this.metadata = metadata;
|
||||
this.expires = expires;
|
||||
this.signature = signature;
|
||||
setObjectName("getuploadparams");
|
||||
}
|
||||
|
||||
public GetUploadParamsResponse() {
|
||||
setObjectName("getuploadparams");
|
||||
}
|
||||
|
||||
public void setId(UUID id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public void setPostURL(URL postURL) {
|
||||
this.postURL = postURL;
|
||||
}
|
||||
|
||||
public void setMetadata(String metadata) {
|
||||
this.metadata = metadata;
|
||||
}
|
||||
|
||||
public void setTimeout(String expires) {
|
||||
this.expires = expires;
|
||||
}
|
||||
|
||||
public void setSignature(String signature) {
|
||||
this.signature = signature;
|
||||
}
|
||||
}
|
||||
|
|
@ -779,3 +779,7 @@ listOpenDaylightControllers=1
|
|||
|
||||
### GloboDNS commands
|
||||
addGloboDnsHost=1
|
||||
|
||||
### volume/template post upload
|
||||
getUploadParamsForVolume=15
|
||||
getUploadParamsForTemplate=15
|
||||
|
|
|
|||
14
core/pom.xml
14
core/pom.xml
|
|
@ -55,21 +55,7 @@
|
|||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-pmd-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>com.mycila</groupId>
|
||||
<artifactId>license-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>cloudstack-checklicence</id>
|
||||
<phase>process-classes</phase>
|
||||
<goals>
|
||||
<goal>check</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
|
||||
</build>
|
||||
|
||||
</project>
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ public class SecStorageSetupCommand extends Command {
|
|||
private DataStoreTO store;
|
||||
private String secUrl;
|
||||
private KeystoreManager.Certificates certs;
|
||||
private String postUploadKey;
|
||||
|
||||
|
||||
public SecStorageSetupCommand() {
|
||||
|
|
@ -66,4 +67,11 @@ public class SecStorageSetupCommand extends Command {
|
|||
this.store = store;
|
||||
}
|
||||
|
||||
public String getPostUploadKey() {
|
||||
return postUploadKey;
|
||||
}
|
||||
|
||||
public void setPostUploadKey(String postUploadKey) {
|
||||
this.postUploadKey = postUploadKey;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import java.net.URISyntaxException;
|
|||
import java.net.URL;
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
|
||||
import org.apache.commons.httpclient.Credentials;
|
||||
import org.apache.commons.httpclient.Header;
|
||||
import org.apache.commons.httpclient.HttpClient;
|
||||
|
|
@ -45,7 +46,6 @@ import org.apache.log4j.Logger;
|
|||
|
||||
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
|
||||
import org.apache.cloudstack.utils.template.TemplateUtils;
|
||||
|
||||
import com.cloud.agent.api.storage.Proxy;
|
||||
import com.cloud.storage.StorageLayer;
|
||||
|
|
@ -259,7 +259,7 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
|
|||
} catch (URISyntaxException e) {
|
||||
s_logger.warn("Invalid download url: " + getDownloadUrl() + ", This should not happen since we have validated the url before!!");
|
||||
}
|
||||
String unsupportedFormat = TemplateUtils.checkTemplateFormat(file.getAbsolutePath(), uripath);
|
||||
String unsupportedFormat = ImageStoreUtil.checkTemplateFormat(file.getAbsolutePath(), uripath);
|
||||
if (unsupportedFormat == null || !unsupportedFormat.isEmpty()) {
|
||||
try {
|
||||
request.abort();
|
||||
|
|
|
|||
|
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
public class TemplateOrVolumePostUploadCommand {
|
||||
|
||||
long entityId;
|
||||
|
||||
String entityUUID;
|
||||
|
||||
String absolutePath;
|
||||
|
||||
String checksum;
|
||||
|
||||
String type;
|
||||
|
||||
String name;
|
||||
|
||||
String localPath;
|
||||
|
||||
boolean requiresHvm;
|
||||
|
||||
String imageFormat;
|
||||
|
||||
String dataTo;
|
||||
|
||||
String dataToRole;
|
||||
|
||||
String remoteEndPoint;
|
||||
|
||||
String maxUploadSize;
|
||||
|
||||
String description;
|
||||
|
||||
private String defaultMaxAccountSecondaryStorage;
|
||||
|
||||
private long accountId;
|
||||
|
||||
public TemplateOrVolumePostUploadCommand(long entityId, String entityUUID, String absolutePath, String checksum, String type, String name, String imageFormat, String dataTo,
|
||||
String dataToRole) {
|
||||
this.entityId = entityId;
|
||||
this.entityUUID = entityUUID;
|
||||
this.absolutePath = absolutePath;
|
||||
this.checksum = checksum;
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.imageFormat = imageFormat;
|
||||
this.dataTo = dataTo;
|
||||
this.dataToRole = dataToRole;
|
||||
}
|
||||
|
||||
public TemplateOrVolumePostUploadCommand() {
|
||||
}
|
||||
|
||||
public String getRemoteEndPoint() {
|
||||
return remoteEndPoint;
|
||||
}
|
||||
|
||||
public void setRemoteEndPoint(String remoteEndPoint) {
|
||||
this.remoteEndPoint = remoteEndPoint;
|
||||
}
|
||||
|
||||
public String getDataTo() {
|
||||
return dataTo;
|
||||
}
|
||||
|
||||
public void setDataTo(String dataTo) {
|
||||
this.dataTo = dataTo;
|
||||
}
|
||||
|
||||
public String getDataToRole() {
|
||||
return dataToRole;
|
||||
}
|
||||
|
||||
public void setDataToRole(String dataToRole) {
|
||||
this.dataToRole = dataToRole;
|
||||
}
|
||||
|
||||
public String getLocalPath() {
|
||||
return localPath;
|
||||
}
|
||||
|
||||
public void setLocalPath(String localPath) {
|
||||
this.localPath = localPath;
|
||||
}
|
||||
|
||||
public boolean getRequiresHvm() {
|
||||
return requiresHvm;
|
||||
}
|
||||
|
||||
public void setRequiresHvm(boolean requiresHvm) {
|
||||
this.requiresHvm = requiresHvm;
|
||||
}
|
||||
|
||||
public String getImageFormat() {
|
||||
return imageFormat;
|
||||
}
|
||||
|
||||
public void setImageFormat(String imageFormat) {
|
||||
this.imageFormat = imageFormat;
|
||||
}
|
||||
|
||||
public long getEntityId() {
|
||||
return entityId;
|
||||
}
|
||||
|
||||
public void setEntityId(long entityId) {
|
||||
this.entityId = entityId;
|
||||
}
|
||||
|
||||
public String getEntityUUID() {
|
||||
return entityUUID;
|
||||
}
|
||||
|
||||
public void setEntityUUID(String entityUUID) {
|
||||
this.entityUUID = entityUUID;
|
||||
}
|
||||
|
||||
public String getAbsolutePath() {
|
||||
return absolutePath;
|
||||
}
|
||||
|
||||
public void setAbsolutePath(String absolutePath) {
|
||||
this.absolutePath = absolutePath;
|
||||
}
|
||||
|
||||
public String getChecksum() {
|
||||
return checksum;
|
||||
}
|
||||
|
||||
public void setChecksum(String checksum) {
|
||||
this.checksum = checksum;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public void setType(String type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getMaxUploadSize() {
|
||||
return maxUploadSize;
|
||||
}
|
||||
|
||||
public void setMaxUploadSize(String maxUploadSize) {
|
||||
this.maxUploadSize = maxUploadSize;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public void setDefaultMaxAccountSecondaryStorage(String defaultMaxAccountSecondaryStorage) {
|
||||
this.defaultMaxAccountSecondaryStorage = defaultMaxAccountSecondaryStorage;
|
||||
}
|
||||
|
||||
public String getDefaultMaxAccountSecondaryStorage() {
|
||||
return defaultMaxAccountSecondaryStorage;
|
||||
}
|
||||
|
||||
public void setAccountId(long accountId) {
|
||||
this.accountId = accountId;
|
||||
}
|
||||
|
||||
public long getAccountId() {
|
||||
return accountId;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
|
||||
public class UploadStatusAnswer extends Answer {
|
||||
public static enum UploadStatus {
|
||||
UNKNOWN, IN_PROGRESS, COMPLETED, ERROR
|
||||
}
|
||||
|
||||
private UploadStatus status;
|
||||
private long virtualSize = 0;
|
||||
private long physicalSize = 0;
|
||||
private String installPath = null;
|
||||
private int downloadPercent = 0;
|
||||
|
||||
protected UploadStatusAnswer() {
|
||||
}
|
||||
|
||||
public UploadStatusAnswer(UploadStatusCommand cmd, UploadStatus status, String msg) {
|
||||
super(cmd, false, msg);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public UploadStatusAnswer(UploadStatusCommand cmd, Exception e) {
|
||||
super(cmd, false, e.getMessage());
|
||||
this.status = UploadStatus.ERROR;
|
||||
}
|
||||
|
||||
public UploadStatusAnswer(UploadStatusCommand cmd, UploadStatus status) {
|
||||
super(cmd, true, null);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public UploadStatus getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public long getVirtualSize() {
|
||||
return virtualSize;
|
||||
}
|
||||
|
||||
public void setVirtualSize(long virtualSize) {
|
||||
this.virtualSize = virtualSize;
|
||||
}
|
||||
|
||||
public long getPhysicalSize() {
|
||||
return physicalSize;
|
||||
}
|
||||
|
||||
public void setPhysicalSize(long physicalSize) {
|
||||
this.physicalSize = physicalSize;
|
||||
}
|
||||
|
||||
public String getInstallPath() {
|
||||
return installPath;
|
||||
}
|
||||
|
||||
public void setInstallPath(String installPath) {
|
||||
this.installPath = installPath;
|
||||
}
|
||||
|
||||
public int getDownloadPercent() {
|
||||
return downloadPercent;
|
||||
}
|
||||
|
||||
public void setDownloadPercent(int downloadPercent) {
|
||||
this.downloadPercent = downloadPercent;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
|
||||
public class UploadStatusCommand extends Command {
|
||||
public enum EntityType {
|
||||
Volume,
|
||||
Template
|
||||
}
|
||||
private String entityUuid;
|
||||
private EntityType entityType;
|
||||
|
||||
protected UploadStatusCommand() {
|
||||
}
|
||||
|
||||
public UploadStatusCommand(String entityUuid, EntityType entityType) {
|
||||
this.entityUuid = entityUuid;
|
||||
this.entityType = entityType;
|
||||
}
|
||||
|
||||
public String getEntityUuid() {
|
||||
return entityUuid;
|
||||
}
|
||||
|
||||
public EntityType getEntityType() {
|
||||
return entityType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -20,6 +20,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
|
|||
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
|
||||
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
||||
import org.apache.cloudstack.storage.command.CommandResult;
|
||||
|
|
@ -96,6 +97,8 @@ public interface VolumeService {
|
|||
|
||||
AsyncCallFuture<VolumeApiResult> registerVolume(VolumeInfo volume, DataStore store);
|
||||
|
||||
public Pair<EndPoint,DataObject> registerVolumeForPostUpload(VolumeInfo volume, DataStore store);
|
||||
|
||||
AsyncCallFuture<VolumeApiResult> resize(VolumeInfo volume);
|
||||
|
||||
void resizeVolumeOnHypervisor(long volumeId, long newSize, long destHostId, String instanceName);
|
||||
|
|
|
|||
|
|
@ -582,10 +582,12 @@ public class VMTemplateVO implements VirtualMachineTemplate {
|
|||
return size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUpdatedCount() {
|
||||
return updatedCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrUpdatedCount() {
|
||||
updatedCount++;
|
||||
}
|
||||
|
|
@ -594,6 +596,7 @@ public class VMTemplateVO implements VirtualMachineTemplate {
|
|||
updatedCount--;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Date getUpdated() {
|
||||
return updated;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,12 +21,14 @@ import java.util.Map;
|
|||
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
import com.cloud.utils.fsm.StateDao;
|
||||
|
||||
/*
|
||||
* Data Access Object for vm_templates table
|
||||
*/
|
||||
public interface VMTemplateDao extends GenericDao<VMTemplateVO, Long> {
|
||||
public interface VMTemplateDao extends GenericDao<VMTemplateVO, Long>, StateDao<VirtualMachineTemplate.State, VirtualMachineTemplate.Event, VirtualMachineTemplate> {
|
||||
|
||||
public List<VMTemplateVO> listByPublic();
|
||||
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@ import com.cloud.utils.db.SearchBuilder;
|
|||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.SearchCriteria.Func;
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
import com.cloud.utils.db.UpdateBuilder;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@Component
|
||||
|
|
@ -104,6 +105,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
|||
private SearchBuilder<VMTemplateVO> UserIsoSearch;
|
||||
private GenericSearchBuilder<VMTemplateVO, Long> CountTemplatesByAccount;
|
||||
// private SearchBuilder<VMTemplateVO> updateStateSearch;
|
||||
private SearchBuilder<VMTemplateVO> AllFieldsSearch;
|
||||
|
||||
@Inject
|
||||
ResourceTagDao _tagsDao;
|
||||
|
|
@ -393,6 +395,16 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
|||
// updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ);
|
||||
// updateStateSearch.done();
|
||||
|
||||
AllFieldsSearch = createSearchBuilder();
|
||||
AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
|
||||
AllFieldsSearch.and("accountId", AllFieldsSearch.entity().getAccountId(), SearchCriteria.Op.EQ);
|
||||
AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
|
||||
AllFieldsSearch.and("destroyed", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
|
||||
AllFieldsSearch.and("notDestroyed", AllFieldsSearch.entity().getState(), SearchCriteria.Op.NEQ);
|
||||
AllFieldsSearch.and("updatedCount", AllFieldsSearch.entity().getUpdatedCount(), SearchCriteria.Op.EQ);
|
||||
AllFieldsSearch.and("name", AllFieldsSearch.entity().getName(), SearchCriteria.Op.EQ);
|
||||
AllFieldsSearch.done();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -1000,4 +1012,64 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
|||
* return templateZonePairList; }
|
||||
*/
|
||||
|
||||
@Override
|
||||
public boolean updateState(
|
||||
com.cloud.template.VirtualMachineTemplate.State currentState,
|
||||
com.cloud.template.VirtualMachineTemplate.Event event,
|
||||
com.cloud.template.VirtualMachineTemplate.State nextState,
|
||||
VirtualMachineTemplate vo, Object data) {
|
||||
|
||||
Long oldUpdated = vo.getUpdatedCount();
|
||||
Date oldUpdatedTime = vo.getUpdated();
|
||||
|
||||
SearchCriteria<VMTemplateVO> sc = AllFieldsSearch.create();
|
||||
sc.setParameters("id", vo.getId());
|
||||
sc.setParameters("state", currentState);
|
||||
sc.setParameters("updatedCount", vo.getUpdatedCount());
|
||||
|
||||
vo.incrUpdatedCount();
|
||||
|
||||
UpdateBuilder builder = getUpdateBuilder(vo);
|
||||
builder.set(vo, "state", nextState);
|
||||
builder.set(vo, "updated", new Date());
|
||||
|
||||
int rows = update((VMTemplateVO)vo, sc);
|
||||
if (rows == 0 && s_logger.isDebugEnabled()) {
|
||||
VMTemplateVO dbTemplate = findByIdIncludingRemoved(vo.getId());
|
||||
if (dbTemplate != null) {
|
||||
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
|
||||
str.append(": DB Data={id=")
|
||||
.append(dbTemplate.getId())
|
||||
.append("; state=")
|
||||
.append(dbTemplate.getState())
|
||||
.append("; updatecount=")
|
||||
.append(dbTemplate.getUpdatedCount())
|
||||
.append(";updatedTime=")
|
||||
.append(dbTemplate.getUpdated());
|
||||
str.append(": New Data={id=")
|
||||
.append(vo.getId())
|
||||
.append("; state=")
|
||||
.append(nextState)
|
||||
.append("; event=")
|
||||
.append(event)
|
||||
.append("; updatecount=")
|
||||
.append(vo.getUpdatedCount())
|
||||
.append("; updatedTime=")
|
||||
.append(vo.getUpdated());
|
||||
str.append(": stale Data={id=")
|
||||
.append(vo.getId())
|
||||
.append("; state=")
|
||||
.append(currentState)
|
||||
.append("; event=")
|
||||
.append(event)
|
||||
.append("; updatecount=")
|
||||
.append(oldUpdated)
|
||||
.append("; updatedTime=")
|
||||
.append(oldUpdatedTime);
|
||||
} else {
|
||||
s_logger.debug("Unable to update template: id=" + vo.getId() + ", as no such template exists in the database anymore");
|
||||
}
|
||||
}
|
||||
return rows > 0;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState
|
|||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
import com.cloud.utils.fsm.StateDao;
|
||||
|
||||
|
|
@ -80,4 +81,6 @@ public interface TemplateDataStoreDao extends GenericDao<TemplateDataStoreVO, Lo
|
|||
void removeByTemplateStore(long templateId, long imageStoreId);
|
||||
|
||||
void expireDnldUrlsForZone(Long dcId);
|
||||
|
||||
List<TemplateDataStoreVO> listByTemplateState(VirtualMachineTemplate.State... states);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import java.util.List;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
import com.cloud.utils.fsm.StateDao;
|
||||
|
||||
|
|
@ -48,4 +49,6 @@ public interface VolumeDataStoreDao extends GenericDao<VolumeDataStoreVO, Long>,
|
|||
void expireDnldUrlsForZone(Long dcId);
|
||||
|
||||
List<VolumeDataStoreVO> listUploadedVolumesByStoreId(long id);
|
||||
|
||||
List<VolumeDataStoreVO> listByVolumeState(Volume.State... states);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ import javax.inject.Inject;
|
|||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService;
|
||||
|
|
@ -87,12 +86,15 @@ import com.cloud.storage.dao.VMTemplateZoneDao;
|
|||
import com.cloud.storage.template.TemplateConstants;
|
||||
import com.cloud.storage.template.TemplateProp;
|
||||
import com.cloud.template.TemplateManager;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.utils.UriUtils;
|
||||
import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.utils.fsm.StateMachine2;
|
||||
|
||||
@Component
|
||||
public class TemplateServiceImpl implements TemplateService {
|
||||
|
|
@ -315,6 +317,7 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
|
||||
toBeDownloaded.addAll(allTemplates);
|
||||
|
||||
final StateMachine2<VirtualMachineTemplate.State, VirtualMachineTemplate.Event, VirtualMachineTemplate> stateMachine = VirtualMachineTemplate.State.getStateMachine();
|
||||
for (VMTemplateVO tmplt : allTemplates) {
|
||||
String uniqueName = tmplt.getUniqueName();
|
||||
TemplateDataStoreVO tmpltStore = _vmTemplateStoreDao.findByStoreTemplate(storeId, tmplt.getId());
|
||||
|
|
@ -330,18 +333,23 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
tmpltStore.setDownloadState(Status.DOWNLOAD_ERROR);
|
||||
String msg = "Template " + tmplt.getName() + ":" + tmplt.getId() + " is corrupted on secondary storage " + tmpltStore.getId();
|
||||
tmpltStore.setErrorString(msg);
|
||||
s_logger.info("msg");
|
||||
if (tmplt.getUrl() == null) {
|
||||
msg =
|
||||
"Private Template (" + tmplt + ") with install path " + tmpltInfo.getInstallPath() +
|
||||
"is corrupted, please check in image store: " + tmpltStore.getDataStoreId();
|
||||
s_logger.info(msg);
|
||||
if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) {
|
||||
s_logger.info("Template Sync found " + uniqueName + " on image store " + storeId + " uploaded using SSVM as corrupted, marking it as failed");
|
||||
tmpltStore.setState(State.Failed);
|
||||
try {
|
||||
stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao);
|
||||
} catch (NoTransitionException e) {
|
||||
s_logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage());
|
||||
}
|
||||
} else if (tmplt.getUrl() == null) {
|
||||
msg = "Private template (" + tmplt + ") with install path " + tmpltInfo.getInstallPath() + " is corrupted, please check in image store: " + tmpltStore.getDataStoreId();
|
||||
s_logger.warn(msg);
|
||||
} else {
|
||||
s_logger.info("Removing template_store_ref entry for corrupted template " + tmplt.getName());
|
||||
_vmTemplateStoreDao.remove(tmpltStore.getId());
|
||||
toBeDownloaded.add(tmplt);
|
||||
}
|
||||
|
||||
} else {
|
||||
tmpltStore.setDownloadPercent(100);
|
||||
tmpltStore.setDownloadState(Status.DOWNLOADED);
|
||||
|
|
@ -355,6 +363,14 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
tmlpt.setSize(tmpltInfo.getSize());
|
||||
_templateDao.update(tmplt.getId(), tmlpt);
|
||||
|
||||
if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) {
|
||||
try {
|
||||
stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationSucceeded, null, _templateDao);
|
||||
} catch (NoTransitionException e) {
|
||||
s_logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// Skipping limit checks for SYSTEM Account and for the templates created from volumes or snapshots
|
||||
// which already got checked and incremented during createTemplate API call.
|
||||
if (tmpltInfo.getSize() > 0 && tmplt.getAccountId() != Account.ACCOUNT_ID_SYSTEM && tmplt.getUrl() != null) {
|
||||
|
|
@ -374,9 +390,7 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
}
|
||||
_vmTemplateStoreDao.update(tmpltStore.getId(), tmpltStore);
|
||||
} else {
|
||||
tmpltStore =
|
||||
new TemplateDataStoreVO(storeId, tmplt.getId(), new Date(), 100, Status.DOWNLOADED, null, null, null, tmpltInfo.getInstallPath(),
|
||||
tmplt.getUrl());
|
||||
tmpltStore = new TemplateDataStoreVO(storeId, tmplt.getId(), new Date(), 100, Status.DOWNLOADED, null, null, null, tmpltInfo.getInstallPath(), tmplt.getUrl());
|
||||
tmpltStore.setSize(tmpltInfo.getSize());
|
||||
tmpltStore.setPhysicalSize(tmpltInfo.getPhysicalSize());
|
||||
tmpltStore.setDataStoreRole(store.getRole());
|
||||
|
|
@ -387,18 +401,28 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
tmlpt.setSize(tmpltInfo.getSize());
|
||||
_templateDao.update(tmplt.getId(), tmlpt);
|
||||
associateTemplateToZone(tmplt.getId(), zoneId);
|
||||
|
||||
}
|
||||
} else if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) {
|
||||
s_logger.info("Template Sync did not find " + uniqueName + " on image store " + storeId + " uploaded using SSVM, marking it as failed");
|
||||
toBeDownloaded.remove(tmplt);
|
||||
tmpltStore.setDownloadState(Status.DOWNLOAD_ERROR);
|
||||
String msg = "Template " + tmplt.getName() + ":" + tmplt.getId() + " is corrupted on secondary storage " + tmpltStore.getId();
|
||||
tmpltStore.setErrorString(msg);
|
||||
tmpltStore.setState(State.Failed);
|
||||
_vmTemplateStoreDao.update(tmpltStore.getId(), tmpltStore);
|
||||
try {
|
||||
stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao);
|
||||
} catch (NoTransitionException e) {
|
||||
s_logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage());
|
||||
}
|
||||
} else {
|
||||
s_logger.info("Template Sync did not find " + uniqueName + " on image store " + storeId +
|
||||
", may request download based on available hypervisor types");
|
||||
s_logger.info("Template Sync did not find " + uniqueName + " on image store " + storeId + ", may request download based on available hypervisor types");
|
||||
if (tmpltStore != null) {
|
||||
if (_storeMgr.isRegionStore(store) && tmpltStore.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED
|
||||
&& tmpltStore.getState() == State.Ready
|
||||
&& tmpltStore.getInstallPath() == null) {
|
||||
s_logger.info("Keep fake entry in template store table for migration of previous NFS to object store");
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
s_logger.info("Removing leftover template " + uniqueName + " entry from template store table");
|
||||
// remove those leftover entries
|
||||
_vmTemplateStoreDao.remove(tmpltStore.getId());
|
||||
|
|
@ -422,7 +446,7 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
availHypers.add(HypervisorType.None); // bug 9809: resume ISO
|
||||
// download.
|
||||
for (VMTemplateVO tmplt : toBeDownloaded) {
|
||||
if (tmplt.getUrl() == null) { // If url is null we can't
|
||||
if (tmplt.getUrl() == null) { // If url is null, skip downloading
|
||||
s_logger.info("Skip downloading template " + tmplt.getUniqueName() + " since no url is specified.");
|
||||
continue;
|
||||
}
|
||||
|
|
@ -458,9 +482,7 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
for (String uniqueName : templateInfos.keySet()) {
|
||||
TemplateProp tInfo = templateInfos.get(uniqueName);
|
||||
if (_tmpltMgr.templateIsDeleteable(tInfo.getId())) {
|
||||
// we cannot directly call deleteTemplateSync here to
|
||||
// reuse delete logic since in this case, our db does not have
|
||||
// this template at all.
|
||||
// we cannot directly call deleteTemplateSync here to reuse delete logic since in this case db does not have this template at all.
|
||||
TemplateObjectTO tmplTO = new TemplateObjectTO();
|
||||
tmplTO.setDataStore(store.getTO());
|
||||
tmplTO.setPath(tInfo.getInstallPath());
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ import org.apache.log4j.Logger;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
|
|
@ -166,7 +165,7 @@ public class TemplateObject implements TemplateInfo {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void processEvent(Event event) {
|
||||
public void processEvent(ObjectInDataStoreStateMachine.Event event) {
|
||||
try {
|
||||
objectInStoreMgr.update(this, event);
|
||||
} catch (NoTransitionException e) {
|
||||
|
|
@ -462,4 +461,21 @@ public class TemplateObject implements TemplateInfo {
|
|||
public Class<?> getEntityType() {
|
||||
return VirtualMachineTemplate.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUpdatedCount() {
|
||||
// TODO Auto-generated method stub
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrUpdatedCount() {
|
||||
// TODO Auto-generated method stub
|
||||
}
|
||||
|
||||
@Override
|
||||
public Date getUpdated() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,6 +89,7 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
|
|||
stateMachines = new StateMachine2<State, Event, DataObjectInStore>();
|
||||
stateMachines.addTransition(State.Allocated, Event.CreateOnlyRequested, State.Creating);
|
||||
stateMachines.addTransition(State.Allocated, Event.DestroyRequested, State.Destroying);
|
||||
stateMachines.addTransition(State.Allocated, Event.OperationFailed, State.Failed);
|
||||
stateMachines.addTransition(State.Creating, Event.OperationFailed, State.Allocated);
|
||||
stateMachines.addTransition(State.Creating, Event.OperationSuccessed, State.Ready);
|
||||
stateMachines.addTransition(State.Ready, Event.CopyingRequested, State.Copying);
|
||||
|
|
@ -98,6 +99,7 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
|
|||
stateMachines.addTransition(State.Destroying, Event.DestroyRequested, State.Destroying);
|
||||
stateMachines.addTransition(State.Destroying, Event.OperationSuccessed, State.Destroyed);
|
||||
stateMachines.addTransition(State.Destroying, Event.OperationFailed, State.Destroying);
|
||||
stateMachines.addTransition(State.Failed, Event.DestroyRequested, State.Destroying);
|
||||
// TODO: further investigate why an extra event is sent when it is
|
||||
// alreay Ready for DownloadListener
|
||||
stateMachines.addTransition(State.Ready, Event.OperationSuccessed, State.Ready);
|
||||
|
|
|
|||
|
|
@ -290,4 +290,21 @@ public class TemplateEntityImpl implements TemplateEntity {
|
|||
public Class<?> getEntityType() {
|
||||
return VirtualMachineTemplate.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUpdatedCount() {
|
||||
// TODO Auto-generated method stub
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrUpdatedCount() {
|
||||
// TODO Auto-generated method stub
|
||||
}
|
||||
|
||||
@Override
|
||||
public Date getUpdated() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ import javax.naming.ConfigurationException;
|
|||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
|
|
@ -45,7 +44,9 @@ import com.cloud.storage.Storage.TemplateType;
|
|||
import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.JoinBuilder.JoinType;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
|
@ -65,7 +66,9 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
|
|||
private SearchBuilder<TemplateDataStoreVO> storeTemplateStateSearch;
|
||||
private SearchBuilder<TemplateDataStoreVO> storeTemplateDownloadStatusSearch;
|
||||
private SearchBuilder<TemplateDataStoreVO> downloadTemplateSearch;
|
||||
private static final String EXPIRE_DOWNLOAD_URLS_FOR_ZONE = "update template_store_ref set download_url_created=? where store_id in (select id from image_store where data_center_id=?)";
|
||||
private SearchBuilder<TemplateDataStoreVO> uploadTemplateStateSearch;
|
||||
private SearchBuilder<VMTemplateVO> templateOnlySearch;
|
||||
private static final String EXPIRE_DOWNLOAD_URLS_FOR_ZONE = "update template_store_ref set download_url_created=? where download_url_created is not null and store_id in (select id from image_store where data_center_id=?)";
|
||||
|
||||
@Inject
|
||||
private DataStoreManager _storeMgr;
|
||||
|
|
@ -136,9 +139,17 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
|
|||
|
||||
downloadTemplateSearch = createSearchBuilder();
|
||||
downloadTemplateSearch.and("download_url", downloadTemplateSearch.entity().getExtractUrl(), Op.NNULL);
|
||||
downloadTemplateSearch.and("download_url_created", downloadTemplateSearch.entity().getExtractUrlCreated(), Op.NNULL);
|
||||
downloadTemplateSearch.and("destroyed", downloadTemplateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ);
|
||||
downloadTemplateSearch.done();
|
||||
|
||||
templateOnlySearch = _tmpltDao.createSearchBuilder();
|
||||
templateOnlySearch.and("states", templateOnlySearch.entity().getState(), SearchCriteria.Op.IN);
|
||||
uploadTemplateStateSearch = createSearchBuilder();
|
||||
uploadTemplateStateSearch.join("templateOnlySearch", templateOnlySearch, templateOnlySearch.entity().getId(), uploadTemplateStateSearch.entity().getTemplateId(), JoinType.LEFT);
|
||||
uploadTemplateStateSearch.and("destroyed", uploadTemplateStateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ);
|
||||
uploadTemplateStateSearch.done();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -531,4 +542,11 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TemplateDataStoreVO> listByTemplateState(VirtualMachineTemplate.State... states) {
|
||||
SearchCriteria<TemplateDataStoreVO> sc = uploadTemplateStateSearch.create();
|
||||
sc.setJoinParameters("templateOnlySearch", "states", (Object[])states);
|
||||
sc.setParameters("destroyed", false);
|
||||
return listIncludingRemovedBy(sc);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ import javax.naming.ConfigurationException;
|
|||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
|
|
@ -36,7 +35,11 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState
|
|||
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
|
||||
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.JoinBuilder.JoinType;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
|
|
@ -53,11 +56,15 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase<VolumeDataStoreVO, Lo
|
|||
private SearchBuilder<VolumeDataStoreVO> storeVolumeSearch;
|
||||
private SearchBuilder<VolumeDataStoreVO> downloadVolumeSearch;
|
||||
private SearchBuilder<VolumeDataStoreVO> uploadVolumeSearch;
|
||||
private static final String EXPIRE_DOWNLOAD_URLS_FOR_ZONE = "update volume_store_ref set download_url_created=? where store_id in (select id from image_store where data_center_id=?)";
|
||||
private SearchBuilder<VolumeVO> volumeOnlySearch;
|
||||
private SearchBuilder<VolumeDataStoreVO> uploadVolumeStateSearch;
|
||||
private static final String EXPIRE_DOWNLOAD_URLS_FOR_ZONE = "update volume_store_ref set download_url_created=? where download_url_created is not null and store_id in (select id from image_store where data_center_id=?)";
|
||||
|
||||
|
||||
@Inject
|
||||
DataStoreManager storeMgr;
|
||||
@Inject
|
||||
VolumeDao volumeDao;
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
|
|
@ -93,6 +100,7 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase<VolumeDataStoreVO, Lo
|
|||
|
||||
downloadVolumeSearch = createSearchBuilder();
|
||||
downloadVolumeSearch.and("download_url", downloadVolumeSearch.entity().getExtractUrl(), Op.NNULL);
|
||||
downloadVolumeSearch.and("download_url_created", downloadVolumeSearch.entity().getExtractUrlCreated(), Op.NNULL);
|
||||
downloadVolumeSearch.and("destroyed", downloadVolumeSearch.entity().getDestroyed(), SearchCriteria.Op.EQ);
|
||||
downloadVolumeSearch.done();
|
||||
|
||||
|
|
@ -102,6 +110,13 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase<VolumeDataStoreVO, Lo
|
|||
uploadVolumeSearch.and("destroyed", uploadVolumeSearch.entity().getDestroyed(), SearchCriteria.Op.EQ);
|
||||
uploadVolumeSearch.done();
|
||||
|
||||
volumeOnlySearch = volumeDao.createSearchBuilder();
|
||||
volumeOnlySearch.and("states", volumeOnlySearch.entity().getState(), Op.IN);
|
||||
uploadVolumeStateSearch = createSearchBuilder();
|
||||
uploadVolumeStateSearch.join("volumeOnlySearch", volumeOnlySearch, volumeOnlySearch.entity().getId(), uploadVolumeStateSearch.entity().getVolumeId(), JoinType.LEFT);
|
||||
uploadVolumeStateSearch.and("destroyed", uploadVolumeStateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ);
|
||||
uploadVolumeStateSearch.done();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -326,4 +341,13 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase<VolumeDataStoreVO, Lo
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<VolumeDataStoreVO> listByVolumeState(Volume.State... states) {
|
||||
SearchCriteria<VolumeDataStoreVO> sc = uploadVolumeStateSearch.create();
|
||||
sc.setJoinParameters("volumeOnlySearch", "states", (Object[])states);
|
||||
sc.setParameters("destroyed", false);
|
||||
return listIncludingRemovedBy(sc);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import javax.inject.Inject;
|
|||
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.storage.RegisterVolumePayload;
|
||||
import com.cloud.utils.Pair;
|
||||
import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
|
|
@ -1226,6 +1227,19 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
return future;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<EndPoint,DataObject> registerVolumeForPostUpload(VolumeInfo volume, DataStore store) {
|
||||
|
||||
EndPoint ep = _epSelector.select(store);
|
||||
if (ep == null) {
|
||||
String errorMessage = "There is no secondary storage VM for image store " + store.getName();
|
||||
s_logger.warn(errorMessage);
|
||||
throw new CloudRuntimeException(errorMessage);
|
||||
}
|
||||
DataObject volumeOnStore = store.create(volume);
|
||||
return new Pair<>(ep,volumeOnStore);
|
||||
}
|
||||
|
||||
protected Void registerVolumeCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> callback, CreateVolumeContext<VolumeApiResult> context) {
|
||||
CreateCmdResult result = callback.getResult();
|
||||
VolumeObject vo = (VolumeObject)context.volume;
|
||||
|
|
@ -1391,7 +1405,7 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
for (VolumeDataStoreVO volumeStore : dbVolumes) {
|
||||
VolumeVO volume = volDao.findById(volumeStore.getVolumeId());
|
||||
if (volume == null) {
|
||||
s_logger.warn("Volume_store_ref shows that volume " + volumeStore.getVolumeId() + " is on image store " + storeId +
|
||||
s_logger.warn("Volume_store_ref table shows that volume " + volumeStore.getVolumeId() + " is on image store " + storeId +
|
||||
", but the volume is not found in volumes table, potentially some bugs in deleteVolume, so we just treat this volume to be deleted and mark it as destroyed");
|
||||
volumeStore.setDestroyed(true);
|
||||
_volumeStoreDao.update(volumeStore.getId(), volumeStore);
|
||||
|
|
@ -1407,20 +1421,23 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
}
|
||||
if (volInfo.isCorrupted()) {
|
||||
volumeStore.setDownloadState(Status.DOWNLOAD_ERROR);
|
||||
String msg = "Volume " + volume.getUuid() + " is corrupted on image store ";
|
||||
String msg = "Volume " + volume.getUuid() + " is corrupted on image store";
|
||||
volumeStore.setErrorString(msg);
|
||||
s_logger.info(msg);
|
||||
if (volumeStore.getDownloadUrl() == null) {
|
||||
msg =
|
||||
"Volume (" + volume.getUuid() + ") with install path " + volInfo.getInstallPath() +
|
||||
"is corrupted, please check in image store: " + volumeStore.getDataStoreId();
|
||||
if (volume.getState() == State.NotUploaded || volume.getState() == State.UploadInProgress) {
|
||||
s_logger.info("Volume Sync found " + volume.getUuid() + " uploaded using SSVM on image store " + storeId + " as corrupted, marking it as failed");
|
||||
_volumeStoreDao.update(volumeStore.getId(), volumeStore);
|
||||
// mark volume as failed, so that storage GC will clean it up
|
||||
VolumeObject volObj = (VolumeObject)volFactory.getVolume(volume.getId());
|
||||
volObj.processEvent(Event.OperationFailed);
|
||||
} else if (volumeStore.getDownloadUrl() == null) {
|
||||
msg = "Volume (" + volume.getUuid() + ") with install path " + volInfo.getInstallPath() + " is corrupted, please check in image store: " + volumeStore.getDataStoreId();
|
||||
s_logger.warn(msg);
|
||||
} else {
|
||||
s_logger.info("Removing volume_store_ref entry for corrupted volume " + volume.getName());
|
||||
_volumeStoreDao.remove(volumeStore.getId());
|
||||
toBeDownloaded.add(volumeStore);
|
||||
}
|
||||
|
||||
} else { // Put them in right status
|
||||
volumeStore.setDownloadPercent(100);
|
||||
volumeStore.setDownloadState(Status.DOWNLOADED);
|
||||
|
|
@ -1437,15 +1454,18 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
volDao.update(volumeStore.getVolumeId(), volume);
|
||||
}
|
||||
|
||||
if (volume.getState() == State.NotUploaded || volume.getState() == State.UploadInProgress) {
|
||||
VolumeObject volObj = (VolumeObject)volFactory.getVolume(volume.getId());
|
||||
volObj.processEvent(Event.OperationSuccessed);
|
||||
}
|
||||
|
||||
if (volInfo.getSize() > 0) {
|
||||
try {
|
||||
_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(volume.getAccountId()),
|
||||
com.cloud.configuration.Resource.ResourceType.secondary_storage, volInfo.getSize() - volInfo.getPhysicalSize());
|
||||
} catch (ResourceAllocationException e) {
|
||||
s_logger.warn(e.getMessage());
|
||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED, volume.getDataCenterId(), volume.getPodId(),
|
||||
e.getMessage(),
|
||||
e.getMessage());
|
||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED, volume.getDataCenterId(), volume.getPodId(), e.getMessage(), e.getMessage());
|
||||
} finally {
|
||||
_resourceLimitMgr.recalculateResourceCount(volume.getAccountId(), volume.getDomainId(),
|
||||
com.cloud.configuration.Resource.ResourceType.secondary_storage.getOrdinal());
|
||||
|
|
@ -1453,18 +1473,28 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
}
|
||||
}
|
||||
continue;
|
||||
} else if (volume.getState() == State.NotUploaded || volume.getState() == State.UploadInProgress) { // failed uploads through SSVM
|
||||
s_logger.info("Volume Sync did not find " + volume.getUuid() + " uploaded using SSVM on image store " + storeId + ", marking it as failed");
|
||||
toBeDownloaded.remove(volumeStore);
|
||||
volumeStore.setDownloadState(Status.DOWNLOAD_ERROR);
|
||||
String msg = "Volume " + volume.getUuid() + " is corrupted on image store";
|
||||
volumeStore.setErrorString(msg);
|
||||
_volumeStoreDao.update(volumeStore.getId(), volumeStore);
|
||||
// mark volume as failed, so that storage GC will clean it up
|
||||
VolumeObject volObj = (VolumeObject)volFactory.getVolume(volume.getId());
|
||||
volObj.processEvent(Event.OperationFailed);
|
||||
continue;
|
||||
}
|
||||
// Volume is not on secondary but we should download.
|
||||
if (volumeStore.getDownloadState() != Status.DOWNLOADED) {
|
||||
s_logger.info("Volume Sync did not find " + volume.getName() + " ready on image store " + storeId +
|
||||
", will request download to start/resume shortly");
|
||||
s_logger.info("Volume Sync did not find " + volume.getName() + " ready on image store " + storeId + ", will request download to start/resume shortly");
|
||||
}
|
||||
}
|
||||
|
||||
// Download volumes which haven't been downloaded yet.
|
||||
if (toBeDownloaded.size() > 0) {
|
||||
for (VolumeDataStoreVO volumeHost : toBeDownloaded) {
|
||||
if (volumeHost.getDownloadUrl() == null) { // If url is null we
|
||||
if (volumeHost.getDownloadUrl() == null) { // If url is null, skip downloading
|
||||
s_logger.info("Skip downloading volume " + volumeHost.getVolumeId() + " since no download url is specified.");
|
||||
continue;
|
||||
}
|
||||
|
|
@ -1472,8 +1502,7 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
// if this is a region store, and there is already an DOWNLOADED entry there without install_path information, which
|
||||
// means that this is a duplicate entry from migration of previous NFS to staging.
|
||||
if (store.getScope().getScopeType() == ScopeType.REGION) {
|
||||
if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED
|
||||
&& volumeHost.getInstallPath() == null) {
|
||||
if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED && volumeHost.getInstallPath() == null) {
|
||||
s_logger.info("Skip sync volume for migration of previous NFS to object store");
|
||||
continue;
|
||||
}
|
||||
|
|
@ -1498,9 +1527,7 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
Long uniqueName = entry.getKey();
|
||||
TemplateProp tInfo = entry.getValue();
|
||||
|
||||
//we cannot directly call expungeVolumeAsync here to
|
||||
// reuse delete logic since in this case, our db does not have
|
||||
// this template at all.
|
||||
// we cannot directly call expungeVolumeAsync here to reuse delete logic since in this case db does not have this volume at all.
|
||||
VolumeObjectTO tmplTO = new VolumeObjectTO();
|
||||
tmplTO.setDataStore(store.getTO());
|
||||
tmplTO.setPath(tInfo.getInstallPath());
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@
|
|||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<version>${cs.commons-io.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-pool</groupId>
|
||||
|
|
|
|||
|
|
@ -70,6 +70,7 @@
|
|||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<version>${cs.commons-io.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@
|
|||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<version>${cs.commons-io.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
|
|
|
|||
|
|
@ -35,12 +35,14 @@ import com.cloud.storage.VMTemplateVO;
|
|||
import com.cloud.storage.VMTemplateZoneVO;
|
||||
import com.cloud.template.TemplateAdapter;
|
||||
import com.cloud.template.TemplateAdapterBase;
|
||||
import com.cloud.template.VirtualMachineTemplate.State;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
|
||||
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
|
||||
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
|
|
@ -83,7 +85,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem
|
|||
|
||||
@Override
|
||||
public VMTemplateVO create(TemplateProfile profile) {
|
||||
VMTemplateVO template = persistTemplate(profile);
|
||||
VMTemplateVO template = persistTemplate(profile, State.Active);
|
||||
Long zoneId = profile.getZoneId();
|
||||
|
||||
// create an entry at template_store_ref with store_id = null to represent that this template is ready for use.
|
||||
|
|
@ -104,6 +106,12 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem
|
|||
return template;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TemplateOrVolumePostUploadCommand> createTemplateForPostUpload(TemplateProfile profile) {
|
||||
// TODO: support baremetal for postupload
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TemplateProfile prepareDelete(DeleteIsoCmd cmd) {
|
||||
throw new CloudRuntimeException("Baremetal doesn't support ISO, how the delete get here???");
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@
|
|||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<version>${cs.commons-io.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ package com.cloud.agent.manager;
|
|||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusAnswer;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusCommand;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.AttachIsoCommand;
|
||||
|
|
@ -106,4 +108,7 @@ public interface MockStorageManager extends Manager {
|
|||
StoragePoolInfo getLocalStorage(String hostGuid, Long storageSize);
|
||||
|
||||
CopyVolumeAnswer CopyVolume(CopyVolumeCommand cmd);
|
||||
|
||||
public UploadStatusAnswer getUploadStatus(UploadStatusCommand cmd);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,10 +33,12 @@ import javax.naming.ConfigurationException;
|
|||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusAnswer;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusAnswer.UploadStatus;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusCommand;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.AttachIsoCommand;
|
||||
|
|
@ -1255,4 +1257,9 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
|
|||
return new CopyVolumeAnswer(cmd, true, null, primaryStorage.getMountPoint(), vol.getPath());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public UploadStatusAnswer getUploadStatus(UploadStatusCommand cmd) {
|
||||
return new UploadStatusAnswer(cmd, UploadStatus.COMPLETED);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import org.apache.cloudstack.storage.command.DeleteCommand;
|
|||
import org.apache.cloudstack.storage.command.DownloadCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusCommand;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
|
|
@ -370,6 +371,8 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage
|
|||
answer = _mockStorageMgr.ComputeChecksum((ComputeChecksumCommand)cmd);
|
||||
} else if (cmd instanceof CreatePrivateTemplateFromVolumeCommand) {
|
||||
answer = _mockStorageMgr.CreatePrivateTemplateFromVolume((CreatePrivateTemplateFromVolumeCommand)cmd);
|
||||
} else if (cmd instanceof UploadStatusCommand) {
|
||||
answer = _mockStorageMgr.getUploadStatus((UploadStatusCommand)cmd);
|
||||
} else if (cmd instanceof MaintainCommand) {
|
||||
answer = _mockAgentMgr.maintain((MaintainCommand)cmd);
|
||||
} else if (cmd instanceof GetVmStatsCommand) {
|
||||
|
|
|
|||
2
pom.xml
2
pom.xml
|
|
@ -66,7 +66,7 @@
|
|||
<cs.guava.version>18.0</cs.guava.version>
|
||||
<cs.xapi.version>6.2.0-3.1</cs.xapi.version>
|
||||
<cs.httpclient.version>4.3.6</cs.httpclient.version>
|
||||
<cs.httpcore.version>4.3.3</cs.httpcore.version>
|
||||
<cs.httpcore.version>4.4</cs.httpcore.version>
|
||||
<cs.commons-httpclient.version>3.1</cs.commons-httpclient.version>
|
||||
<cs.mysql.version>5.1.34</cs.mysql.version>
|
||||
<cs.xstream.version>1.4.7</cs.xstream.version>
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@
|
|||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<version>${cs.commons-io.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
|
|
@ -49,6 +50,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpcore</artifactId>
|
||||
<version>${cs.httpcore.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
|
|
|
|||
|
|
@ -266,4 +266,5 @@
|
|||
<property name="gslbServiceProviders" value="#{gslbServiceProvidersRegistry.registered}" />
|
||||
</bean>
|
||||
<bean id="certServiceImpl" class="org.apache.cloudstack.network.lb.CertServiceImpl" />
|
||||
<bean id="imageStoreUploadMonitorImpl" class="com.cloud.storage.ImageStoreUploadMonitorImpl" />
|
||||
</beans>
|
||||
|
|
|
|||
|
|
@ -3317,7 +3317,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService {
|
|||
if (showRemovedTmpl) {
|
||||
uniqueTmplPair = _templateJoinDao.searchIncludingRemovedAndCount(sc, searchFilter);
|
||||
} else {
|
||||
sc.addAnd("templateState", SearchCriteria.Op.EQ, State.Active);
|
||||
sc.addAnd("templateState", SearchCriteria.Op.IN, new State[]{State.Active, State.NotUploaded, State.UploadInProgress});
|
||||
uniqueTmplPair = _templateJoinDao.searchAndCount(sc, searchFilter);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ public class TemplateJoinDaoImpl extends GenericDaoBase<TemplateJoinVO, Long> im
|
|||
protected TemplateJoinDaoImpl() {
|
||||
|
||||
tmpltIdPairSearch = createSearchBuilder();
|
||||
tmpltIdPairSearch.and("templateState", tmpltIdPairSearch.entity().getTemplateState(), SearchCriteria.Op.EQ);
|
||||
tmpltIdPairSearch.and("templateState", tmpltIdPairSearch.entity().getTemplateState(), SearchCriteria.Op.IN);
|
||||
tmpltIdPairSearch.and("tempZonePairIN", tmpltIdPairSearch.entity().getTempZonePair(), SearchCriteria.Op.IN);
|
||||
tmpltIdPairSearch.done();
|
||||
|
||||
|
|
@ -412,7 +412,7 @@ public class TemplateJoinDaoImpl extends GenericDaoBase<TemplateJoinVO, Long> im
|
|||
}
|
||||
SearchCriteria<TemplateJoinVO> sc = tmpltIdPairSearch.create();
|
||||
if (!showRemoved) {
|
||||
sc.setParameters("templateState", VirtualMachineTemplate.State.Active);
|
||||
sc.setParameters("templateState", VirtualMachineTemplate.State.Active, VirtualMachineTemplate.State.NotUploaded, VirtualMachineTemplate.State.UploadInProgress);
|
||||
}
|
||||
sc.setParameters("tempZonePairIN", labels);
|
||||
List<TemplateJoinVO> vms = searchIncludingRemoved(sc, searchFilter, null, false);
|
||||
|
|
|
|||
|
|
@ -2088,7 +2088,9 @@ public enum Config {
|
|||
PublishAsynJobEvent("Advanced", ManagementServer.class, Boolean.class, "publish.async.job.events", "true", "enable or disable publishing of usage events on the event bus", null),
|
||||
|
||||
// StatsCollector
|
||||
StatsOutPutGraphiteHost("Advanced", ManagementServer.class, String.class, "stats.output.uri", "", "URI to additionally send StatsCollector statistics to", null);
|
||||
StatsOutPutGraphiteHost("Advanced", ManagementServer.class, String.class, "stats.output.uri", "", "URI to additionally send StatsCollector statistics to", null),
|
||||
|
||||
SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "", "PSK with SSVM", null);
|
||||
|
||||
private final String _category;
|
||||
private final Class<?> _componentClass;
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ import javax.naming.ConfigurationException;
|
|||
import com.cloud.utils.nio.Link;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.apache.cloudstack.config.ApiServiceConfiguration;
|
||||
|
|
@ -309,6 +310,9 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
|||
// store the public and private keys in the database
|
||||
updateKeyPairs();
|
||||
|
||||
// generate a PSK to communicate with SSVM
|
||||
updateSecondaryStorageVMSharedKey();
|
||||
|
||||
// generate a random password for system vm
|
||||
updateSystemvmPassword();
|
||||
|
||||
|
|
@ -967,19 +971,43 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
|||
|
||||
private void updateSSOKey() {
|
||||
try {
|
||||
String encodedKey = null;
|
||||
|
||||
// Algorithm for SSO Keys is SHA1, should this be configurable?
|
||||
KeyGenerator generator = KeyGenerator.getInstance("HmacSHA1");
|
||||
SecretKey key = generator.generateKey();
|
||||
encodedKey = Base64.encodeBase64URLSafeString(key.getEncoded());
|
||||
|
||||
_configDao.update(Config.SSOKey.key(), Config.SSOKey.getCategory(), encodedKey);
|
||||
_configDao.update(Config.SSOKey.key(), Config.SSOKey.getCategory(), getPrivateKey());
|
||||
} catch (NoSuchAlgorithmException ex) {
|
||||
s_logger.error("error generating sso key", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* preshared key to be used by management server to communicate with SSVM during volume/template upload
|
||||
*/
|
||||
private void updateSecondaryStorageVMSharedKey() {
|
||||
try {
|
||||
ConfigurationVO configInDB = _configDao.findByName(Config.SSVMPSK.key());
|
||||
if(configInDB == null) {
|
||||
ConfigurationVO configVO = new ConfigurationVO(Config.SSVMPSK.getCategory(), "DEFAULT", Config.SSVMPSK.getComponent(), Config.SSVMPSK.key(), getPrivateKey(),
|
||||
Config.SSVMPSK.getDescription());
|
||||
s_logger.info("generating a new SSVM PSK. This goes to SSVM on Start");
|
||||
_configDao.persist(configVO);
|
||||
} else if (StringUtils.isEmpty(configInDB.getValue())) {
|
||||
s_logger.info("updating the SSVM PSK with new value. This goes to SSVM on Start");
|
||||
_configDao.update(Config.SSVMPSK.key(), Config.SSVMPSK.getCategory(), getPrivateKey());
|
||||
}
|
||||
} catch (NoSuchAlgorithmException ex) {
|
||||
s_logger.error("error generating ssvm psk", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private String getPrivateKey() throws NoSuchAlgorithmException {
|
||||
String encodedKey = null;
|
||||
// Algorithm for generating Key is SHA1, should this be configurable?
|
||||
KeyGenerator generator = KeyGenerator.getInstance("HmacSHA1");
|
||||
SecretKey key = generator.generateKey();
|
||||
encodedKey = Base64.encodeBase64URLSafeString(key.getEncoded());
|
||||
return encodedKey;
|
||||
|
||||
}
|
||||
|
||||
|
||||
@DB
|
||||
protected HostPodVO createPod(long userId, String podName, final long zoneId, String gateway, String cidr, final String startIp, String endIp)
|
||||
throws InternalErrorException {
|
||||
|
|
|
|||
|
|
@ -41,6 +41,8 @@ import javax.naming.ConfigurationException;
|
|||
|
||||
import org.apache.cloudstack.api.command.admin.usage.RemoveRawUsageRecordsCmd;
|
||||
import org.apache.cloudstack.api.command.user.snapshot.UpdateSnapshotPolicyCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.GetUploadParamsForVolumeCmd;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.cloudstack.acl.ControlledEntity;
|
||||
|
|
@ -3014,7 +3016,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
cmdList.add(UpdateVPCCmdByAdmin.class);
|
||||
cmdList.add(UpdateLBStickinessPolicyCmd.class);
|
||||
cmdList.add(UpdateLBHealthCheckPolicyCmd.class);
|
||||
|
||||
cmdList.add(GetUploadParamsForTemplateCmd.class);
|
||||
cmdList.add(GetUploadParamsForVolumeCmd.class);
|
||||
return cmdList;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,27 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.storage;
|
||||
|
||||
import com.cloud.utils.component.Manager;
|
||||
|
||||
/**
|
||||
* Monitor upload progress of all entities.
|
||||
*
|
||||
*/
|
||||
public interface ImageStoreUploadMonitor extends Manager {
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,436 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.storage;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.Configurable;
|
||||
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusAnswer;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusAnswer.UploadStatus;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusCommand;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusCommand.EntityType;
|
||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
|
||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||
|
||||
import com.cloud.agent.Listener;
|
||||
import com.cloud.agent.api.AgentControlAnswer;
|
||||
import com.cloud.agent.api.AgentControlCommand;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.StartupCommand;
|
||||
import com.cloud.exception.ConnectionException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.Status;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.storage.Volume.Event;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.component.ManagerBase;
|
||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.TransactionCallbackNoReturn;
|
||||
import com.cloud.utils.db.TransactionStatus;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.utils.fsm.StateMachine2;
|
||||
|
||||
/**
|
||||
* Monitors the progress of upload.
|
||||
*/
|
||||
@Component
|
||||
@Local(value = {ImageStoreUploadMonitor.class})
|
||||
public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageStoreUploadMonitor, Listener, Configurable {
|
||||
|
||||
static final Logger s_logger = Logger.getLogger(ImageStoreUploadMonitorImpl.class);
|
||||
|
||||
@Inject
|
||||
private VolumeDao _volumeDao;
|
||||
@Inject
|
||||
private VolumeDataStoreDao _volumeDataStoreDao;
|
||||
@Inject
|
||||
private VMTemplateDao _templateDao;
|
||||
@Inject
|
||||
private TemplateDataStoreDao _templateDataStoreDao;
|
||||
@Inject
|
||||
private HostDao _hostDao;
|
||||
@Inject
|
||||
private EndPointSelector _epSelector;
|
||||
@Inject
|
||||
private DataStoreManager storeMgr;
|
||||
@Inject
|
||||
ResourceLimitService _resourceLimitMgr;
|
||||
|
||||
private long _nodeId;
|
||||
private ScheduledExecutorService _executor = null;
|
||||
private int _monitoringInterval;
|
||||
private long _uploadOperationTimeout;
|
||||
|
||||
static final ConfigKey<Integer> UploadMonitoringInterval = new ConfigKey<Integer>("Advanced", Integer.class, "upload.monitoring.interval", "60",
|
||||
"Interval (in seconds) to check the status of volumes that are uploaded using HTTP POST request", true);
|
||||
static final ConfigKey<Integer> UploadOperationTimeout = new ConfigKey<Integer>("Advanced", Integer.class, "upload.operation.timeout", "10",
|
||||
"Time (in minutes) to wait before abandoning volume upload using HTTP POST request", true);
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
_executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Upload-Monitor"));
|
||||
_monitoringInterval = UploadMonitoringInterval.value();
|
||||
_uploadOperationTimeout = UploadOperationTimeout.value() * 60 * 1000;
|
||||
_nodeId = ManagementServerNode.getManagementServerId();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean start() {
|
||||
_executor.scheduleWithFixedDelay(new UploadStatusCheck(), _monitoringInterval, _monitoringInterval, TimeUnit.SECONDS);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean stop() {
|
||||
_executor.shutdownNow();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean processAnswers(long agentId, long seq, Answer[] answers) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean processCommands(long agentId, long seq, Command[] commands) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean processDisconnect(long agentId, Status state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRecurring() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getTimeout() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean processTimeout(long agentId, long seq) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||
}
|
||||
|
||||
protected class UploadStatusCheck extends ManagedContextRunnable {
|
||||
|
||||
public UploadStatusCheck() {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void runInContext() {
|
||||
// 1. Select all entries with download_state = Not_Downloaded or Download_In_Progress
|
||||
// 2. Get corresponding volume
|
||||
// 3. Get EP using _epSelector
|
||||
// 4. Check if SSVM is owned by this MS
|
||||
// 5. If owned by MS then send command to appropriate SSVM
|
||||
// 6. In listener check for the answer and update DB accordingly
|
||||
List<VolumeDataStoreVO> volumeDataStores = _volumeDataStoreDao.listByVolumeState(Volume.State.NotUploaded, Volume.State.UploadInProgress);
|
||||
for (VolumeDataStoreVO volumeDataStore : volumeDataStores) {
|
||||
try {
|
||||
DataStore dataStore = storeMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image);
|
||||
EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl());
|
||||
if (ep == null) {
|
||||
s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName());
|
||||
continue;
|
||||
}
|
||||
VolumeVO volume = _volumeDao.findById(volumeDataStore.getVolumeId());
|
||||
if (volume == null) {
|
||||
s_logger.warn("Volume with id " + volumeDataStore.getVolumeId() + " not found");
|
||||
continue;
|
||||
}
|
||||
Host host = _hostDao.findById(ep.getId());
|
||||
UploadStatusCommand cmd = new UploadStatusCommand(volume.getUuid(), EntityType.Volume);
|
||||
if (host != null && host.getManagementServerId() != null) {
|
||||
if (_nodeId == host.getManagementServerId().longValue()) {
|
||||
Answer answer = null;
|
||||
try {
|
||||
answer = ep.sendMessage(cmd);
|
||||
} catch (CloudRuntimeException e) {
|
||||
s_logger.warn("Unable to get upload status for volume " + volume.getUuid() + ". Error details: " + e.getMessage());
|
||||
answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage());
|
||||
}
|
||||
if (answer == null || !(answer instanceof UploadStatusAnswer)) {
|
||||
s_logger.warn("No or invalid answer corresponding to UploadStatusCommand for volume " + volumeDataStore.getVolumeId());
|
||||
continue;
|
||||
}
|
||||
handleVolumeStatusResponse((UploadStatusAnswer)answer, volume, volumeDataStore);
|
||||
}
|
||||
} else {
|
||||
String error = "Volume " + volume.getUuid() + " failed to upload as SSVM is either destroyed or SSVM agent not in 'Up' state";
|
||||
handleVolumeStatusResponse(new UploadStatusAnswer(cmd, UploadStatus.ERROR, error), volume, volumeDataStore);
|
||||
}
|
||||
} catch (Throwable th) {
|
||||
s_logger.warn("Exception while checking status for uploaded volume " + volumeDataStore.getExtractUrl() + ". Error details: " + th.getMessage());
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Exception details: ", th);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle for template upload as well
|
||||
List<TemplateDataStoreVO> templateDataStores = _templateDataStoreDao.listByTemplateState(VirtualMachineTemplate.State.NotUploaded, VirtualMachineTemplate.State.UploadInProgress);
|
||||
for (TemplateDataStoreVO templateDataStore : templateDataStores) {
|
||||
try {
|
||||
DataStore dataStore = storeMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image);
|
||||
EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl());
|
||||
if (ep == null) {
|
||||
s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName());
|
||||
continue;
|
||||
}
|
||||
VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId());
|
||||
if (template == null) {
|
||||
s_logger.warn("Template with id " + templateDataStore.getTemplateId() + " not found");
|
||||
continue;
|
||||
}
|
||||
Host host = _hostDao.findById(ep.getId());
|
||||
UploadStatusCommand cmd = new UploadStatusCommand(template.getUuid(), EntityType.Template);
|
||||
if (host != null && host.getManagementServerId() != null) {
|
||||
if (_nodeId == host.getManagementServerId().longValue()) {
|
||||
Answer answer = null;
|
||||
try {
|
||||
answer = ep.sendMessage(cmd);
|
||||
} catch (CloudRuntimeException e) {
|
||||
s_logger.warn("Unable to get upload status for template " + template.getUuid() + ". Error details: " + e.getMessage());
|
||||
answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage());
|
||||
}
|
||||
if (answer == null || !(answer instanceof UploadStatusAnswer)) {
|
||||
s_logger.warn("No or invalid answer corresponding to UploadStatusCommand for template " + templateDataStore.getTemplateId());
|
||||
continue;
|
||||
}
|
||||
handleTemplateStatusResponse((UploadStatusAnswer)answer, template, templateDataStore);
|
||||
}
|
||||
} else {
|
||||
String error = "Template " + template.getUuid() + " failed to upload as SSVM is either destroyed or SSVM agent not in 'Up' state";
|
||||
handleTemplateStatusResponse(new UploadStatusAnswer(cmd, UploadStatus.ERROR, error), template, templateDataStore);
|
||||
}
|
||||
} catch (Throwable th) {
|
||||
s_logger.warn("Exception while checking status for uploaded template " + templateDataStore.getExtractUrl() + ". Error details: " + th.getMessage());
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Exception details: ", th);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void handleVolumeStatusResponse(final UploadStatusAnswer answer, final VolumeVO volume, final VolumeDataStoreVO volumeDataStore) {
|
||||
final StateMachine2<Volume.State, Event, Volume> stateMachine = Volume.State.getStateMachine();
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||
VolumeVO tmpVolume = _volumeDao.findById(volume.getId());
|
||||
VolumeDataStoreVO tmpVolumeDataStore = _volumeDataStoreDao.findById(volumeDataStore.getId());
|
||||
try {
|
||||
switch (answer.getStatus()) {
|
||||
case COMPLETED:
|
||||
tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
|
||||
tmpVolumeDataStore.setState(State.Ready);
|
||||
tmpVolumeDataStore.setInstallPath(answer.getInstallPath());
|
||||
tmpVolumeDataStore.setPhysicalSize(answer.getPhysicalSize());
|
||||
tmpVolumeDataStore.setSize(answer.getVirtualSize());
|
||||
tmpVolumeDataStore.setDownloadPercent(100);
|
||||
|
||||
VolumeVO volumeUpdate = _volumeDao.createForUpdate();
|
||||
volumeUpdate.setSize(answer.getVirtualSize());
|
||||
_volumeDao.update(tmpVolume.getId(), volumeUpdate);
|
||||
stateMachine.transitTo(tmpVolume, Event.OperationSucceeded, null, _volumeDao);
|
||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), Resource.ResourceType.secondary_storage, answer.getVirtualSize());
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Volume " + tmpVolume.getUuid() + " uploaded successfully");
|
||||
}
|
||||
break;
|
||||
case IN_PROGRESS:
|
||||
if (tmpVolume.getState() == Volume.State.NotUploaded) {
|
||||
tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS);
|
||||
tmpVolumeDataStore.setDownloadPercent(answer.getDownloadPercent());
|
||||
stateMachine.transitTo(tmpVolume, Event.UploadRequested, null, _volumeDao);
|
||||
} else if (tmpVolume.getState() == Volume.State.UploadInProgress) { // check for timeout
|
||||
if (System.currentTimeMillis() - tmpVolumeDataStore.getCreated().getTime() > _uploadOperationTimeout) {
|
||||
tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR);
|
||||
tmpVolumeDataStore.setState(State.Failed);
|
||||
stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out");
|
||||
}
|
||||
} else {
|
||||
tmpVolumeDataStore.setDownloadPercent(answer.getDownloadPercent());
|
||||
}
|
||||
}
|
||||
break;
|
||||
case ERROR:
|
||||
tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR);
|
||||
tmpVolumeDataStore.setState(State.Failed);
|
||||
stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Volume " + tmpVolume.getUuid() + " failed to upload. Error details: " + answer.getDetails());
|
||||
}
|
||||
break;
|
||||
case UNKNOWN:
|
||||
if (tmpVolume.getState() == Volume.State.NotUploaded) { // check for timeout
|
||||
if (System.currentTimeMillis() - tmpVolumeDataStore.getCreated().getTime() > _uploadOperationTimeout) {
|
||||
tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED);
|
||||
tmpVolumeDataStore.setState(State.Failed);
|
||||
stateMachine.transitTo(tmpVolume, Event.OperationTimeout, null, _volumeDao);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
_volumeDataStoreDao.update(tmpVolumeDataStore.getId(), tmpVolumeDataStore);
|
||||
} catch (NoTransitionException e) {
|
||||
s_logger.error("Unexpected error " + e.getMessage());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void handleTemplateStatusResponse(final UploadStatusAnswer answer, final VMTemplateVO template, final TemplateDataStoreVO templateDataStore) {
|
||||
final StateMachine2<VirtualMachineTemplate.State, VirtualMachineTemplate.Event, VirtualMachineTemplate> stateMachine = VirtualMachineTemplate.State.getStateMachine();
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||
VMTemplateVO tmpTemplate = _templateDao.findById(template.getId());
|
||||
TemplateDataStoreVO tmpTemplateDataStore = _templateDataStoreDao.findById(templateDataStore.getId());
|
||||
try {
|
||||
switch (answer.getStatus()) {
|
||||
case COMPLETED:
|
||||
tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
|
||||
tmpTemplateDataStore.setState(State.Ready);
|
||||
tmpTemplateDataStore.setInstallPath(answer.getInstallPath());
|
||||
tmpTemplateDataStore.setPhysicalSize(answer.getPhysicalSize());
|
||||
tmpTemplateDataStore.setSize(answer.getVirtualSize());
|
||||
tmpTemplateDataStore.setDownloadPercent(100);
|
||||
tmpTemplateDataStore.setExtractUrl(null);
|
||||
|
||||
VMTemplateVO templateUpdate = _templateDao.createForUpdate();
|
||||
templateUpdate.setSize(answer.getVirtualSize());
|
||||
_templateDao.update(tmpTemplate.getId(), templateUpdate);
|
||||
stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationSucceeded, null, _templateDao);
|
||||
_resourceLimitMgr.incrementResourceCount(template.getAccountId(), Resource.ResourceType.secondary_storage, answer.getVirtualSize());
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Template " + tmpTemplate.getUuid() + " uploaded successfully");
|
||||
}
|
||||
break;
|
||||
case IN_PROGRESS:
|
||||
if (tmpTemplate.getState() == VirtualMachineTemplate.State.NotUploaded) {
|
||||
tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS);
|
||||
stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.UploadRequested, null, _templateDao);
|
||||
tmpTemplateDataStore.setDownloadPercent(answer.getDownloadPercent());
|
||||
} else if (tmpTemplate.getState() == VirtualMachineTemplate.State.UploadInProgress) { // check for timeout
|
||||
if (System.currentTimeMillis() - tmpTemplateDataStore.getCreated().getTime() > _uploadOperationTimeout) {
|
||||
tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR);
|
||||
tmpTemplateDataStore.setState(State.Failed);
|
||||
stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out");
|
||||
}
|
||||
} else {
|
||||
tmpTemplateDataStore.setDownloadPercent(answer.getDownloadPercent());
|
||||
}
|
||||
}
|
||||
break;
|
||||
case ERROR:
|
||||
tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR);
|
||||
tmpTemplateDataStore.setState(State.Failed);
|
||||
stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Template " + tmpTemplate.getUuid() + " failed to upload. Error details: " + answer.getDetails());
|
||||
}
|
||||
break;
|
||||
case UNKNOWN:
|
||||
if (tmpTemplate.getState() == VirtualMachineTemplate.State.NotUploaded) { // check for timeout
|
||||
if (System.currentTimeMillis() - tmpTemplateDataStore.getCreated().getTime() > _uploadOperationTimeout) {
|
||||
tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED);
|
||||
tmpTemplateDataStore.setState(State.Failed);
|
||||
stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationTimeout, null, _templateDao);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
_templateDataStoreDao.update(tmpTemplateDataStore.getId(), tmpTemplateDataStore);
|
||||
} catch (NoTransitionException e) {
|
||||
s_logger.error("Unexpected error " + e.getMessage());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getConfigComponentName() {
|
||||
return ImageStoreUploadMonitor.class.getSimpleName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigKey<?>[] getConfigKeys() {
|
||||
return new ConfigKey<?>[] {UploadMonitoringInterval, UploadOperationTimeout};
|
||||
}
|
||||
|
||||
public static int getUploadOperationTimeout() {
|
||||
return UploadOperationTimeout.value();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -44,7 +44,6 @@ import javax.naming.ConfigurationException;
|
|||
import com.cloud.hypervisor.Hypervisor;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
|
||||
|
|
@ -60,6 +59,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||
|
|
@ -71,7 +71,9 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
|
|
@ -109,6 +111,7 @@ import com.cloud.cluster.ClusterManagerListener;
|
|||
import com.cloud.cluster.ManagementServerHost;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.configuration.ConfigurationManager;
|
||||
import com.cloud.configuration.Resource.ResourceType;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.DataCenterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
|
|
@ -150,8 +153,10 @@ import com.cloud.storage.dao.VolumeDao;
|
|||
import com.cloud.storage.listener.StoragePoolMonitor;
|
||||
import com.cloud.storage.listener.VolumeStateListener;
|
||||
import com.cloud.template.TemplateManager;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.user.dao.UserDao;
|
||||
import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
|
|
@ -195,6 +200,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
@Inject
|
||||
protected VolumeDao _volsDao;
|
||||
@Inject
|
||||
private VolumeDataStoreDao _volumeDataStoreDao;
|
||||
@Inject
|
||||
protected HostDao _hostDao;
|
||||
@Inject
|
||||
protected SnapshotDao _snapshotDao;
|
||||
|
|
@ -247,10 +254,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
@Inject
|
||||
ManagementServer _msServer;
|
||||
@Inject
|
||||
DataStoreManager dataStoreMgr;
|
||||
@Inject
|
||||
DataStoreProviderManager dataStoreProviderMgr;
|
||||
@Inject
|
||||
VolumeService volService;
|
||||
@Inject
|
||||
VolumeDataFactory volFactory;
|
||||
|
|
@ -270,6 +273,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
EndPointSelector _epSelector;
|
||||
@Inject
|
||||
private DiskOfferingDao _diskOfferingDao;
|
||||
@Inject
|
||||
ResourceLimitService _resourceLimitMgr;
|
||||
|
||||
protected List<StoragePoolDiscoverer> _discoverers;
|
||||
|
||||
|
|
@ -571,7 +576,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
}
|
||||
}
|
||||
|
||||
DataStoreProvider provider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
|
||||
DataStoreProvider provider = _dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
|
||||
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||
if (pool == null) {
|
||||
Map<String, Object> params = new HashMap<String, Object>();
|
||||
|
|
@ -588,7 +593,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
|
||||
store = lifeCycle.initialize(params);
|
||||
} else {
|
||||
store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
store = _dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
pool = _storagePoolDao.findById(store.getId());
|
||||
|
|
@ -602,17 +607,17 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e);
|
||||
}
|
||||
|
||||
return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
|
||||
return _dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException,
|
||||
ResourceUnavailableException {
|
||||
String providerName = cmd.getStorageProviderName();
|
||||
DataStoreProvider storeProvider = dataStoreProviderMgr.getDataStoreProvider(providerName);
|
||||
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName);
|
||||
|
||||
if (storeProvider == null) {
|
||||
storeProvider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
|
||||
storeProvider = _dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
|
||||
if (storeProvider == null) {
|
||||
throw new InvalidParameterValueException("can't find storage provider: " + providerName);
|
||||
}
|
||||
|
|
@ -707,7 +712,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
throw new CloudRuntimeException("Failed to add data store: "+e.getMessage(), e);
|
||||
}
|
||||
|
||||
return (PrimaryDataStoreInfo)dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
|
||||
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
private Map<String, String> extractApiParamAsMap(Map ds) {
|
||||
|
|
@ -798,7 +803,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
|
||||
if (updatedCapacityBytes != null || updatedCapacityIops != null) {
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(id);
|
||||
DataStoreProvider dataStoreProvider = dataStoreProviderMgr.getDataStoreProvider(storagePool.getStorageProviderName());
|
||||
DataStoreProvider dataStoreProvider = _dataStoreProviderMgr.getDataStoreProvider(storagePool.getStorageProviderName());
|
||||
DataStoreLifeCycle dataStoreLifeCycle = dataStoreProvider.getDataStoreLifeCycle();
|
||||
|
||||
if (dataStoreLifeCycle instanceof PrimaryDataStoreLifeCycle) {
|
||||
|
|
@ -823,7 +828,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
_storagePoolDao.updateCapacityIops(id, capacityIops);
|
||||
}
|
||||
|
||||
return (PrimaryDataStoreInfo)dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -887,19 +892,19 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
_storagePoolDao.releaseFromLockTable(lock.getId());
|
||||
s_logger.trace("Released lock for storage pool " + id);
|
||||
|
||||
DataStoreProvider storeProvider = dataStoreProviderMgr.getDataStoreProvider(sPool.getStorageProviderName());
|
||||
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(sPool.getStorageProviderName());
|
||||
DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
|
||||
DataStore store = dataStoreMgr.getDataStore(sPool.getId(), DataStoreRole.Primary);
|
||||
DataStore store = _dataStoreMgr.getDataStore(sPool.getId(), DataStoreRole.Primary);
|
||||
return lifeCycle.deleteDataStore(store);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException {
|
||||
StoragePool pool = (StoragePool)dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
||||
StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
||||
assert (pool.isShared()) : "Now, did you actually read the name of this method?";
|
||||
s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId);
|
||||
|
||||
DataStoreProvider provider = dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||
DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||
HypervisorHostListener listener = hostListeners.get(provider.getName());
|
||||
listener.hostConnect(hostId, pool.getId());
|
||||
}
|
||||
|
|
@ -1064,11 +1069,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
List<VolumeVO> vols = _volsDao.listVolumesToBeDestroyed();
|
||||
for (VolumeVO vol : vols) {
|
||||
try {
|
||||
|
||||
volService.expungeVolumeAsync(volFactory.getVolume(vol.getId()));
|
||||
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to destroy " + vol.getId(), e);
|
||||
s_logger.warn("Unable to destroy volume " + vol.getUuid(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1082,10 +1085,99 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
}
|
||||
_snapshotDao.expunge(snapshotVO.getId());
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to destroy " + snapshotVO.getId(), e);
|
||||
s_logger.warn("Unable to destroy snapshot " + snapshotVO.getUuid(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// destroy uploaded volumes in abandoned/error state
|
||||
List<VolumeDataStoreVO> volumeDataStores = _volumeDataStoreDao.listByVolumeState(Volume.State.UploadError, Volume.State.UploadAbandoned);
|
||||
for (VolumeDataStoreVO volumeDataStore : volumeDataStores) {
|
||||
VolumeVO volume = _volumeDao.findById(volumeDataStore.getVolumeId());
|
||||
if (volume == null) {
|
||||
s_logger.warn("Uploaded volume with id " + volumeDataStore.getVolumeId() + " not found, so cannot be destroyed");
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
DataStore dataStore = _dataStoreMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image);
|
||||
EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl());
|
||||
if (ep == null) {
|
||||
s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName() + ", cannot destroy uploaded volume " + volume.getUuid());
|
||||
continue;
|
||||
}
|
||||
Host host = _hostDao.findById(ep.getId());
|
||||
if (host != null && host.getManagementServerId() != null) {
|
||||
if (_serverId == host.getManagementServerId().longValue()) {
|
||||
if (!volService.destroyVolume(volume.getId())) {
|
||||
s_logger.warn("Unable to destroy uploaded volume " + volume.getUuid());
|
||||
continue;
|
||||
}
|
||||
// decrement volume resource count
|
||||
_resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, volume.isDisplayVolume());
|
||||
// expunge volume from secondary if volume is on image store
|
||||
VolumeInfo volOnSecondary = volFactory.getVolume(volume.getId(), DataStoreRole.Image);
|
||||
if (volOnSecondary != null) {
|
||||
s_logger.info("Expunging volume " + volume.getUuid() + " uploaded using HTTP POST from secondary data store");
|
||||
AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volOnSecondary);
|
||||
VolumeApiResult result = future.get();
|
||||
if (!result.isSuccess()) {
|
||||
s_logger.warn("Failed to expunge volume " + volume.getUuid() + " from the image store " + dataStore.getName() + " due to: " + result.getResult());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Throwable th) {
|
||||
s_logger.warn("Unable to destroy uploaded volume " + volume.getUuid() + ". Error details: " + th.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// destroy uploaded templates in abandoned/error state
|
||||
List<TemplateDataStoreVO> templateDataStores = _templateStoreDao.listByTemplateState(VirtualMachineTemplate.State.UploadError, VirtualMachineTemplate.State.UploadAbandoned);
|
||||
for (TemplateDataStoreVO templateDataStore : templateDataStores) {
|
||||
VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId());
|
||||
if (template == null) {
|
||||
s_logger.warn("Uploaded template with id " + templateDataStore.getTemplateId() + " not found, so cannot be destroyed");
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
DataStore dataStore = _dataStoreMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image);
|
||||
EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl());
|
||||
if (ep == null) {
|
||||
s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName() + ", cannot destroy uploaded template " + template.getUuid());
|
||||
continue;
|
||||
}
|
||||
Host host = _hostDao.findById(ep.getId());
|
||||
if (host != null && host.getManagementServerId() != null) {
|
||||
if (_serverId == host.getManagementServerId().longValue()) {
|
||||
AsyncCallFuture<TemplateApiResult> future = _imageSrv.deleteTemplateAsync(tmplFactory.getTemplate(template.getId(), dataStore));
|
||||
TemplateApiResult result = future.get();
|
||||
if (!result.isSuccess()) {
|
||||
s_logger.warn("Failed to delete template " + template.getUuid() + " from the image store " + dataStore.getName() + " due to: " + result.getResult());
|
||||
continue;
|
||||
}
|
||||
// remove from template_zone_ref
|
||||
List<VMTemplateZoneVO> templateZones = _vmTemplateZoneDao.listByZoneTemplate(((ImageStoreEntity)dataStore).getDataCenterId(), template.getId());
|
||||
if (templateZones != null) {
|
||||
for (VMTemplateZoneVO templateZone : templateZones) {
|
||||
_vmTemplateZoneDao.remove(templateZone.getId());
|
||||
}
|
||||
}
|
||||
// mark all the occurrences of this template in the given store as destroyed
|
||||
_templateStoreDao.removeByTemplateStore(template.getId(), dataStore.getId());
|
||||
// find all eligible image stores for this template
|
||||
List<DataStore> imageStores = _tmpltMgr.getImageStoreByTemplate(template.getId(), null);
|
||||
if (imageStores == null || imageStores.size() == 0) {
|
||||
template.setState(VirtualMachineTemplate.State.Inactive);
|
||||
_templateDao.update(template.getId(), template);
|
||||
|
||||
// decrement template resource count
|
||||
_resourceLimitMgr.decrementResourceCount(template.getAccountId(), ResourceType.template);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Throwable th) {
|
||||
s_logger.warn("Unable to destroy uploaded template " + template.getUuid() + ". Error details: " + th.getMessage());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
scanLock.unlock();
|
||||
}
|
||||
|
|
@ -1144,7 +1236,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
// so here we don't need to issue DeleteCommand to resource anymore, only need to remove db entry.
|
||||
try {
|
||||
// Cleanup templates in template_store_ref
|
||||
List<DataStore> imageStores = dataStoreMgr.getImageStoresByScope(new ZoneScope(null));
|
||||
List<DataStore> imageStores = _dataStoreMgr.getImageStoresByScope(new ZoneScope(null));
|
||||
for (DataStore store : imageStores) {
|
||||
try {
|
||||
long storeId = store.getId();
|
||||
|
|
@ -1245,12 +1337,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
primaryStorage.getStatus().toString());
|
||||
}
|
||||
|
||||
DataStoreProvider provider = dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
|
||||
DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
|
||||
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||
DataStore store = dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
|
||||
DataStore store = _dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
|
||||
lifeCycle.maintain(store);
|
||||
|
||||
return (PrimaryDataStoreInfo)dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
|
||||
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -1272,12 +1364,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
primaryStorage.getStatus().toString(), primaryStorageId);
|
||||
}
|
||||
|
||||
DataStoreProvider provider = dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
|
||||
DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
|
||||
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||
DataStore store = dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
|
||||
DataStore store = _dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
|
||||
lifeCycle.cancelMaintain(store);
|
||||
|
||||
return (PrimaryDataStoreInfo)dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
|
||||
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1405,7 +1497,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
|
||||
@Override
|
||||
public PrimaryDataStoreInfo getStoragePool(long id) {
|
||||
return (PrimaryDataStoreInfo)dataStoreMgr.getDataStore(id, DataStoreRole.Primary);
|
||||
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(id, DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -1824,9 +1916,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
|
||||
_accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), store.getDataCenterId());
|
||||
|
||||
DataStoreProvider provider = dataStoreProviderMgr.getDataStoreProvider(store.getProviderName());
|
||||
DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(store.getProviderName());
|
||||
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||
DataStore secStore = dataStoreMgr.getDataStore(storeId, DataStoreRole.Image);
|
||||
DataStore secStore = _dataStoreMgr.getDataStore(storeId, DataStoreRole.Image);
|
||||
lifeCycle.migrateToObjectStore(secStore);
|
||||
// update store_role in template_store_ref and snapshot_store_ref to ImageCache
|
||||
_templateStoreDao.updateStoreRoleToCachce(storeId);
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@
|
|||
// under the License.
|
||||
package com.cloud.storage;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
|
|
@ -26,8 +28,18 @@ import java.util.concurrent.ExecutionException;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import com.cloud.utils.EncryptionUtil;
|
||||
import com.cloud.utils.db.TransactionCallbackWithException;
|
||||
import com.google.gson.Gson;
|
||||
import com.google.gson.GsonBuilder;
|
||||
|
||||
import org.apache.cloudstack.api.command.user.volume.GetUploadParamsForVolumeCmd;
|
||||
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
|
||||
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd;
|
||||
|
|
@ -154,6 +166,9 @@ import com.cloud.vm.dao.VMInstanceDao;
|
|||
import com.cloud.vm.snapshot.VMSnapshotVO;
|
||||
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
|
||||
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiService, VmWorkJobHandler {
|
||||
private final static Logger s_logger = Logger.getLogger(VolumeApiServiceImpl.class);
|
||||
public static final String VM_WORK_JOB_HANDLER = VolumeApiServiceImpl.class.getSimpleName();
|
||||
|
|
@ -259,7 +274,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
|
||||
validateVolume(caller, ownerId, zoneId, volumeName, url, format, diskOfferingId);
|
||||
|
||||
VolumeVO volume = persistVolume(owner, zoneId, volumeName, url, cmd.getFormat(), diskOfferingId);
|
||||
VolumeVO volume = persistVolume(owner, zoneId, volumeName, url, cmd.getFormat(), diskOfferingId, Volume.State.Allocated);
|
||||
|
||||
VolumeInfo vol = volFactory.getVolume(volume.getId());
|
||||
|
||||
|
|
@ -270,6 +285,84 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
return volume;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPLOAD, eventDescription = "uploading volume for post upload", async = true)
|
||||
public GetUploadParamsResponse uploadVolume(final GetUploadParamsForVolumeCmd cmd) throws ResourceAllocationException, MalformedURLException {
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
long ownerId = cmd.getEntityOwnerId();
|
||||
final Account owner = _entityMgr.findById(Account.class, ownerId);
|
||||
final Long zoneId = cmd.getZoneId();
|
||||
final String volumeName = cmd.getName();
|
||||
String format = cmd.getFormat();
|
||||
final Long diskOfferingId = cmd.getDiskOfferingId();
|
||||
String imageStoreUuid = cmd.getImageStoreUuid();
|
||||
final DataStore store = _tmpltMgr.getImageStore(imageStoreUuid, zoneId);
|
||||
|
||||
validateVolume(caller, ownerId, zoneId, volumeName, null, format, diskOfferingId);
|
||||
|
||||
return Transaction.execute(new TransactionCallbackWithException<GetUploadParamsResponse, MalformedURLException>() {
|
||||
@Override
|
||||
public GetUploadParamsResponse doInTransaction(TransactionStatus status) throws MalformedURLException {
|
||||
|
||||
VolumeVO volume = persistVolume(owner, zoneId, volumeName, null, cmd.getFormat(), diskOfferingId, Volume.State.NotUploaded);
|
||||
|
||||
VolumeInfo vol = volFactory.getVolume(volume.getId());
|
||||
|
||||
RegisterVolumePayload payload = new RegisterVolumePayload(null, cmd.getChecksum(), cmd.getFormat());
|
||||
vol.addPayload(payload);
|
||||
|
||||
Pair<EndPoint, DataObject> pair = volService.registerVolumeForPostUpload(vol, store);
|
||||
EndPoint ep = pair.first();
|
||||
DataObject dataObject = pair.second();
|
||||
|
||||
|
||||
GetUploadParamsResponse response = new GetUploadParamsResponse();
|
||||
|
||||
String ssvmUrlDomain = _configDao.getValue(Config.SecStorageSecureCopyCert.key());
|
||||
|
||||
String url = ImageStoreUtil.generatePostUploadUrl(ssvmUrlDomain, ep.getPublicAddr(), vol.getUuid());
|
||||
response.setPostURL(new URL(url));
|
||||
|
||||
// set the post url, this is used in the monitoring thread to determine the SSVM
|
||||
VolumeDataStoreVO volumeStore = _volumeStoreDao.findByVolume(vol.getId());
|
||||
if (volumeStore != null) {
|
||||
volumeStore.setExtractUrl(url);
|
||||
_volumeStoreDao.persist(volumeStore);
|
||||
}
|
||||
|
||||
response.setId(UUID.fromString(vol.getUuid()));
|
||||
|
||||
int timeout = ImageStoreUploadMonitorImpl.getUploadOperationTimeout();
|
||||
DateTime currentDateTime = new DateTime(DateTimeZone.UTC);
|
||||
String expires = currentDateTime.plusMinutes(timeout).toString();
|
||||
response.setTimeout(expires);
|
||||
|
||||
String key = _configDao.getValue(Config.SSVMPSK.key());
|
||||
/*
|
||||
* encoded metadata using the post upload config key
|
||||
*/
|
||||
TemplateOrVolumePostUploadCommand command =
|
||||
new TemplateOrVolumePostUploadCommand(vol.getId(), vol.getUuid(), volumeStore.getInstallPath(), cmd.getChecksum(), vol.getType().toString(),
|
||||
vol.getName(), vol.getFormat().toString(), dataObject.getDataStore().getUri(),
|
||||
dataObject.getDataStore().getRole().toString());
|
||||
command.setLocalPath(volumeStore.getLocalDownloadPath());
|
||||
//using the existing max upload size configuration
|
||||
command.setMaxUploadSize(_configDao.getValue(Config.MaxUploadVolumeSize.key()));
|
||||
command.setDefaultMaxAccountSecondaryStorage(_configDao.getValue(Config.DefaultMaxAccountSecondaryStorage.key()));
|
||||
command.setAccountId(vol.getAccountId());
|
||||
Gson gson = new GsonBuilder().create();
|
||||
String metadata = EncryptionUtil.encodeData(gson.toJson(command), key);
|
||||
response.setMetadata(metadata);
|
||||
|
||||
/*
|
||||
* signature calculated on the url, expiry, metadata.
|
||||
*/
|
||||
response.setSignature(EncryptionUtil.generateSignature(metadata + url + expires, key));
|
||||
return response;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private boolean validateVolume(Account caller, long ownerId, Long zoneId, String volumeName, String url,
|
||||
String format, Long diskOfferingId) throws ResourceAllocationException {
|
||||
|
||||
|
|
@ -291,22 +384,26 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId);
|
||||
}
|
||||
|
||||
if (url.toLowerCase().contains("file://")) {
|
||||
throw new InvalidParameterValueException("File:// type urls are currently unsupported");
|
||||
}
|
||||
|
||||
ImageFormat imgfmt = ImageFormat.valueOf(format.toUpperCase());
|
||||
if (imgfmt == null) {
|
||||
throw new IllegalArgumentException("Image format is incorrect " + format + ". Supported formats are " + EnumUtils.listValues(ImageFormat.values()));
|
||||
}
|
||||
|
||||
UriUtils.validateUrl(format, url);
|
||||
|
||||
//validating the url only when url is not null. url can be null incase of form based post upload
|
||||
if (url != null ) {
|
||||
if( url.toLowerCase().contains("file://")) {
|
||||
throw new InvalidParameterValueException("File:// type urls are currently unsupported");
|
||||
}
|
||||
UriUtils.validateUrl(format, url);
|
||||
// check URL existence
|
||||
UriUtils.checkUrlExistence(url);
|
||||
// Check that the resource limit for secondary storage won't be exceeded
|
||||
_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), ResourceType.secondary_storage, UriUtils.getRemoteSize(url));
|
||||
} else {
|
||||
_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), ResourceType.secondary_storage);
|
||||
}
|
||||
|
||||
// Check that the resource limit for secondary storage won't be exceeded
|
||||
_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), ResourceType.secondary_storage, UriUtils.getRemoteSize(url));
|
||||
try {
|
||||
ImageFormat.valueOf(format.toUpperCase());
|
||||
} catch (IllegalArgumentException e) {
|
||||
s_logger.debug("ImageFormat IllegalArgumentException: " + e.getMessage());
|
||||
throw new IllegalArgumentException("Image format: " + format + " is incorrect. Supported formats are " + EnumUtils.listValues(ImageFormat.values()));
|
||||
}
|
||||
|
||||
// Check that the the disk offering specified is valid
|
||||
if (diskOfferingId != null) {
|
||||
|
|
@ -335,7 +432,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
|
||||
@DB
|
||||
protected VolumeVO persistVolume(final Account owner, final Long zoneId, final String volumeName, final String url,
|
||||
final String format, final Long diskOfferingId) {
|
||||
final String format, final Long diskOfferingId, final Volume.State state) {
|
||||
return Transaction.execute(new TransactionCallback<VolumeVO>() {
|
||||
@Override
|
||||
public VolumeVO doInTransaction(TransactionStatus status) {
|
||||
|
|
@ -343,7 +440,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
volume.setPoolId(null);
|
||||
volume.setDataCenterId(zoneId);
|
||||
volume.setPodId(null);
|
||||
// to prevent a nullpointer deref I put the system account id here when no owner is given.
|
||||
volume.setState(state); // initialize the state
|
||||
// to prevent a null pointer deref I put the system account id here when no owner is given.
|
||||
// TODO Decide if this is valid or whether throwing a CloudRuntimeException is more appropriate
|
||||
volume.setAccountId((owner == null) ? Account.ACCOUNT_ID_SYSTEM : owner.getAccountId());
|
||||
volume.setDomainId((owner == null) ? Domain.ROOT_DOMAIN : owner.getDomainId());
|
||||
|
|
@ -368,7 +466,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
// Increment resource count during allocation; if actual creation fails,
|
||||
// decrement it
|
||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume);
|
||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.secondary_storage, UriUtils.getRemoteSize(url));
|
||||
//url can be null incase of postupload
|
||||
if(url!=null) {
|
||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.secondary_storage, UriUtils.getRemoteSize(url));
|
||||
}
|
||||
|
||||
return volume;
|
||||
}
|
||||
|
|
@ -1087,11 +1188,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
|
||||
VolumeVO volume = _volsDao.findById(volumeId);
|
||||
if (volume == null) {
|
||||
throw new InvalidParameterValueException("Unable to aquire volume with ID: " + volumeId);
|
||||
throw new InvalidParameterValueException("Unable to find volume with ID: " + volumeId);
|
||||
}
|
||||
|
||||
if (!_snapshotMgr.canOperateOnVolume(volume)) {
|
||||
throw new InvalidParameterValueException("There are snapshot creating on it, Unable to delete the volume");
|
||||
throw new InvalidParameterValueException("There are snapshot operations in progress on the volume, unable to delete it");
|
||||
}
|
||||
|
||||
_accountMgr.checkAccess(caller, null, true, volume);
|
||||
|
|
@ -1107,6 +1208,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
}
|
||||
}
|
||||
|
||||
if (volume.getState() == Volume.State.NotUploaded || volume.getState() == Volume.State.UploadInProgress) {
|
||||
throw new InvalidParameterValueException("The volume is either getting uploaded or it may be initiated shortly, please wait for it to be completed");
|
||||
}
|
||||
|
||||
try {
|
||||
if (volume.getState() != Volume.State.Destroy && volume.getState() != Volume.State.Expunging && volume.getState() != Volume.State.Expunging) {
|
||||
Long instanceId = volume.getInstanceId();
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package com.cloud.template;
|
|||
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
|
@ -25,8 +26,15 @@ import java.util.concurrent.ExecutionException;
|
|||
import javax.ejb.Local;
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.TransactionCallback;
|
||||
import com.cloud.utils.db.TransactionStatus;
|
||||
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
||||
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
|
||||
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
|
||||
|
|
@ -70,6 +78,7 @@ import com.cloud.storage.VMTemplateVO;
|
|||
import com.cloud.storage.VMTemplateZoneVO;
|
||||
import com.cloud.storage.dao.VMTemplateZoneDao;
|
||||
import com.cloud.storage.download.DownloadMonitor;
|
||||
import com.cloud.template.VirtualMachineTemplate.State;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.UriUtils;
|
||||
|
|
@ -134,10 +143,19 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
|||
return profile;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException {
|
||||
TemplateProfile profile = super.prepare(cmd);
|
||||
|
||||
// Check that the resource limit for secondary storage won't be exceeded
|
||||
_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(cmd.getEntityOwnerId()), ResourceType.secondary_storage);
|
||||
return profile;
|
||||
}
|
||||
|
||||
@Override
|
||||
public VMTemplateVO create(TemplateProfile profile) {
|
||||
// persist entry in vm_template, vm_template_details and template_zone_ref tables, not that entry at template_store_ref is not created here, and created in createTemplateAsync.
|
||||
VMTemplateVO template = persistTemplate(profile);
|
||||
VMTemplateVO template = persistTemplate(profile, State.Active);
|
||||
|
||||
if (template == null) {
|
||||
throw new CloudRuntimeException("Unable to persist the template " + profile.getTemplate());
|
||||
|
|
@ -192,6 +210,86 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
|||
return template;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TemplateOrVolumePostUploadCommand> createTemplateForPostUpload(final TemplateProfile profile) {
|
||||
// persist entry in vm_template, vm_template_details and template_zone_ref tables, not that entry at template_store_ref is not created here, and created in createTemplateAsync.
|
||||
return Transaction.execute(new TransactionCallback<List<TemplateOrVolumePostUploadCommand>>() {
|
||||
|
||||
@Override
|
||||
public List<TemplateOrVolumePostUploadCommand> doInTransaction(TransactionStatus status) {
|
||||
|
||||
VMTemplateVO template = persistTemplate(profile, State.NotUploaded);
|
||||
|
||||
if (template == null) {
|
||||
throw new CloudRuntimeException("Unable to persist the template " + profile.getTemplate());
|
||||
}
|
||||
|
||||
// find all eligible image stores for this zone scope
|
||||
List<DataStore> imageStores = storeMgr.getImageStoresByScope(new ZoneScope(profile.getZoneId()));
|
||||
if (imageStores == null || imageStores.size() == 0) {
|
||||
throw new CloudRuntimeException("Unable to find image store to download template " + profile.getTemplate());
|
||||
}
|
||||
|
||||
List<TemplateOrVolumePostUploadCommand> payloads = new LinkedList<>();
|
||||
Set<Long> zoneSet = new HashSet<Long>();
|
||||
Collections.shuffle(imageStores); // For private templates choose a random store. TODO - Have a better algorithm based on size, no. of objects, load etc.
|
||||
for (DataStore imageStore : imageStores) {
|
||||
// skip data stores for a disabled zone
|
||||
Long zoneId = imageStore.getScope().getScopeId();
|
||||
if (zoneId != null) {
|
||||
DataCenterVO zone = _dcDao.findById(zoneId);
|
||||
if (zone == null) {
|
||||
s_logger.warn("Unable to find zone by id " + zoneId + ", so skip downloading template to its image store " + imageStore.getId());
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if zone is disabled
|
||||
if (Grouping.AllocationState.Disabled == zone.getAllocationState()) {
|
||||
s_logger.info("Zone " + zoneId + " is disabled, so skip downloading template to its image store " + imageStore.getId());
|
||||
continue;
|
||||
}
|
||||
|
||||
// We want to download private template to one of the image store in a zone
|
||||
if (isPrivateTemplate(template) && zoneSet.contains(zoneId)) {
|
||||
continue;
|
||||
} else {
|
||||
zoneSet.add(zoneId);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
TemplateInfo tmpl = imageFactory.getTemplate(template.getId(), imageStore);
|
||||
//imageService.createTemplateAsync(tmpl, imageStore, caller);
|
||||
|
||||
// persist template_store_ref entry
|
||||
DataObject templateOnStore = imageStore.create(tmpl);
|
||||
// update template_store_ref and template state
|
||||
|
||||
EndPoint ep = _epSelector.select(templateOnStore);
|
||||
if (ep == null) {
|
||||
String errMsg = "There is no secondary storage VM for downloading template to image store " + imageStore.getName();
|
||||
s_logger.warn(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
TemplateOrVolumePostUploadCommand payload = new TemplateOrVolumePostUploadCommand(template.getId(), template.getUuid(), tmpl.getInstallPath(), tmpl
|
||||
.getChecksum(), tmpl.getType().toString(), template.getUniqueName(), template.getFormat().toString(), templateOnStore.getDataStore().getUri(),
|
||||
templateOnStore.getDataStore().getRole().toString());
|
||||
//using the existing max template size configuration
|
||||
payload.setMaxUploadSize(_configDao.getValue(Config.MaxTemplateAndIsoSize.key()));
|
||||
payload.setDefaultMaxAccountSecondaryStorage(_configDao.getValue(Config.DefaultMaxAccountSecondaryStorage.key()));
|
||||
payload.setAccountId(template.getAccountId());
|
||||
payload.setRemoteEndPoint(ep.getPublicAddr());
|
||||
payload.setRequiresHvm(template.requiresHvm());
|
||||
payload.setDescription(template.getDisplayText());
|
||||
payloads.add(payload);
|
||||
}
|
||||
_resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template);
|
||||
return payloads;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private boolean isPrivateTemplate(VMTemplateVO template){
|
||||
|
||||
// if public OR featured OR system template
|
||||
|
|
|
|||
|
|
@ -16,12 +16,14 @@
|
|||
// under the License.
|
||||
package com.cloud.template;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
|
||||
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
|
||||
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
|
|
@ -31,6 +33,7 @@ import com.cloud.storage.TemplateProfile;
|
|||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.component.Adapter;
|
||||
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
|
||||
|
||||
public interface TemplateAdapter extends Adapter {
|
||||
public static class TemplateAdapterType {
|
||||
|
|
@ -50,10 +53,14 @@ public interface TemplateAdapter extends Adapter {
|
|||
|
||||
public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException;
|
||||
|
||||
public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException;
|
||||
|
||||
public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException;
|
||||
|
||||
public VMTemplateVO create(TemplateProfile profile);
|
||||
|
||||
public List<TemplateOrVolumePostUploadCommand> createTemplateForPostUpload(TemplateProfile profile);
|
||||
|
||||
public TemplateProfile prepareDelete(DeleteTemplateCmd cmd);
|
||||
|
||||
public TemplateProfile prepareDelete(DeleteIsoCmd cmd);
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import java.util.Map;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
|
|
@ -182,7 +183,8 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
|
|||
throw new InvalidParameterValueException("Please specify a valid zone Id. Only admins can create templates in all zones.");
|
||||
}
|
||||
|
||||
if (url.toLowerCase().contains("file://")) {
|
||||
// check for the url format only when url is not null. url can be null incase of form based upload
|
||||
if (url != null && url.toLowerCase().contains("file://")) {
|
||||
throw new InvalidParameterValueException("File:// type urls are currently unsupported");
|
||||
}
|
||||
|
||||
|
|
@ -196,9 +198,12 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
|
|||
featured = Boolean.FALSE;
|
||||
}
|
||||
|
||||
ImageFormat imgfmt = ImageFormat.valueOf(format.toUpperCase());
|
||||
if (imgfmt == null) {
|
||||
throw new IllegalArgumentException("Image format is incorrect " + format + ". Supported formats are " + EnumUtils.listValues(ImageFormat.values()));
|
||||
ImageFormat imgfmt;
|
||||
try {
|
||||
imgfmt = ImageFormat.valueOf(format.toUpperCase());
|
||||
} catch (IllegalArgumentException e) {
|
||||
s_logger.debug("ImageFormat IllegalArgumentException: " + e.getMessage());
|
||||
throw new IllegalArgumentException("Image format: " + format + " is incorrect. Supported formats are " + EnumUtils.listValues(ImageFormat.values()));
|
||||
}
|
||||
|
||||
// Check that the resource limit for templates/ISOs won't be exceeded
|
||||
|
|
@ -272,6 +277,29 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException {
|
||||
//check if the caller can operate with the template owner
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
Account owner = _accountMgr.getAccount(cmd.getEntityOwnerId());
|
||||
_accountMgr.checkAccess(caller, null, true, owner);
|
||||
|
||||
boolean isRouting = (cmd.isRoutingType() == null) ? false : cmd.isRoutingType();
|
||||
|
||||
Long zoneId = cmd.getZoneId();
|
||||
// ignore passed zoneId if we are using region wide image store
|
||||
List<ImageStoreVO> stores = _imgStoreDao.findRegionImageStores();
|
||||
if (stores != null && stores.size() > 0) {
|
||||
zoneId = -1L;
|
||||
}
|
||||
|
||||
return prepare(false, CallContext.current().getCallingUserId(), cmd.getName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(),
|
||||
cmd.getRequiresHvm(), null, cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneId,
|
||||
HypervisorType.getType(cmd.getHypervisor()), cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null,
|
||||
cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException {
|
||||
//check if the caller can operate with the template owner
|
||||
|
|
@ -291,13 +319,14 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
|
|||
owner, null, false, cmd.getImageStoreUuid(), cmd.isDynamicallyScalable(), TemplateType.USER);
|
||||
}
|
||||
|
||||
protected VMTemplateVO persistTemplate(TemplateProfile profile) {
|
||||
protected VMTemplateVO persistTemplate(TemplateProfile profile, VirtualMachineTemplate.State initialState) {
|
||||
Long zoneId = profile.getZoneId();
|
||||
VMTemplateVO template =
|
||||
new VMTemplateVO(profile.getTemplateId(), profile.getName(), profile.getFormat(), profile.getIsPublic(), profile.getFeatured(), profile.getIsExtractable(),
|
||||
profile.getTemplateType(), profile.getUrl(), profile.getRequiresHVM(), profile.getBits(), profile.getAccountId(), profile.getCheckSum(),
|
||||
profile.getDisplayText(), profile.getPasswordEnabled(), profile.getGuestOsId(), profile.getBootable(), profile.getHypervisorType(),
|
||||
profile.getTemplateTag(), profile.getDetails(), profile.getSshKeyEnabled(), profile.IsDynamicallyScalable());
|
||||
template.setState(initialState);
|
||||
|
||||
if (zoneId == null || zoneId.longValue() == -1) {
|
||||
List<DataCenterVO> dcs = _dcDao.listAll();
|
||||
|
|
|
|||
|
|
@ -16,12 +16,15 @@
|
|||
// under the License.
|
||||
package com.cloud.template;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
|
@ -30,8 +33,17 @@ import javax.ejb.Local;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import com.cloud.storage.ImageStoreUploadMonitorImpl;
|
||||
import com.cloud.utils.EncryptionUtil;
|
||||
import com.google.gson.Gson;
|
||||
import com.google.gson.GsonBuilder;
|
||||
|
||||
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
|
||||
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
|
||||
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
|
||||
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd;
|
||||
|
|
@ -179,6 +191,9 @@ import com.cloud.vm.VirtualMachineProfile;
|
|||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
@Local(value = {TemplateManager.class, TemplateApiService.class})
|
||||
public class TemplateManagerImpl extends ManagerBase implements TemplateManager, TemplateApiService, Configurable {
|
||||
private final static Logger s_logger = Logger.getLogger(TemplateManagerImpl.class);
|
||||
|
|
@ -319,6 +334,61 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating post upload template")
|
||||
public GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException {
|
||||
TemplateAdapter adapter = getAdapter(HypervisorType.getType(cmd.getHypervisor()));
|
||||
TemplateProfile profile = adapter.prepare(cmd);
|
||||
List<TemplateOrVolumePostUploadCommand> payload = adapter.createTemplateForPostUpload(profile);
|
||||
|
||||
if(CollectionUtils.isNotEmpty(payload)) {
|
||||
GetUploadParamsResponse response = new GetUploadParamsResponse();
|
||||
|
||||
/*
|
||||
* There can be one or more commands depending on the number of secondary stores the template needs to go to. Taking the first one to do the url upload. The
|
||||
* template will be propagated to the rest through copy by management server commands.
|
||||
*/
|
||||
TemplateOrVolumePostUploadCommand firstCommand = payload.get(0);
|
||||
|
||||
String ssvmUrlDomain = _configDao.getValue(Config.SecStorageSecureCopyCert.key());
|
||||
|
||||
String url = ImageStoreUtil.generatePostUploadUrl(ssvmUrlDomain, firstCommand.getRemoteEndPoint(), firstCommand.getEntityUUID());
|
||||
response.setPostURL(new URL(url));
|
||||
|
||||
// set the post url, this is used in the monitoring thread to determine the SSVM
|
||||
TemplateDataStoreVO templateStore = _tmplStoreDao.findByTemplate(firstCommand.getEntityId(), DataStoreRole.getRole(firstCommand.getDataToRole()));
|
||||
if (templateStore != null) {
|
||||
templateStore.setExtractUrl(url);
|
||||
_tmplStoreDao.persist(templateStore);
|
||||
}
|
||||
|
||||
response.setId(UUID.fromString(firstCommand.getEntityUUID()));
|
||||
|
||||
int timeout = ImageStoreUploadMonitorImpl.getUploadOperationTimeout();
|
||||
DateTime currentDateTime = new DateTime(DateTimeZone.UTC);
|
||||
String expires = currentDateTime.plusMinutes(timeout).toString();
|
||||
response.setTimeout(expires);
|
||||
|
||||
String key = _configDao.getValue(Config.SSVMPSK.key());
|
||||
/*
|
||||
* encoded metadata using the post upload config ssh key
|
||||
*/
|
||||
Gson gson = new GsonBuilder().create();
|
||||
String metadata = EncryptionUtil.encodeData(gson.toJson(firstCommand), key);
|
||||
response.setMetadata(metadata);
|
||||
|
||||
/*
|
||||
* signature calculated on the url, expiry, metadata.
|
||||
*/
|
||||
response.setSignature(EncryptionUtil.generateSignature(metadata + url + expires, key));
|
||||
|
||||
return response;
|
||||
} else {
|
||||
throw new CloudRuntimeException("Unable to register template.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public DataStore getImageStore(String storeUuid, Long zoneId) {
|
||||
DataStore imageStore = null;
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@
|
|||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<version>${cs.commons-io.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.sf.ehcache</groupId>
|
||||
|
|
|
|||
|
|
@ -310,6 +310,10 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
|
|||
setupCmd = new SecStorageSetupCommand(ssStore.getTO(), secUrl, certs);
|
||||
}
|
||||
|
||||
//template/volume file upload key
|
||||
String postUploadKey = _configDao.getValue(Config.SSVMPSK.key());
|
||||
setupCmd.setPostUploadKey(postUploadKey);
|
||||
|
||||
Answer answer = _agentMgr.easySend(ssHostId, setupCmd);
|
||||
if (answer != null && answer.getResult()) {
|
||||
SecStorageSetupAnswer an = (SecStorageSetupAnswer)answer;
|
||||
|
|
|
|||
|
|
@ -53,6 +53,13 @@
|
|||
<artifactId>cloud-server</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<!-- dependencies for starting a post upload server on ssvm -->
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-all</artifactId>
|
||||
<version>4.0.25.Final</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,295 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.resource;
|
||||
|
||||
import static io.netty.buffer.Unpooled.copiedBuffer;
|
||||
import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
|
||||
import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH;
|
||||
import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_TYPE;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.cloudstack.storage.template.UploadEntity;
|
||||
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.channel.Channel;
|
||||
import io.netty.channel.ChannelFuture;
|
||||
import io.netty.channel.ChannelFutureListener;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.SimpleChannelInboundHandler;
|
||||
import io.netty.handler.codec.http.DefaultFullHttpResponse;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.handler.codec.http.HttpContent;
|
||||
import io.netty.handler.codec.http.HttpHeaders;
|
||||
import io.netty.handler.codec.http.HttpMethod;
|
||||
import io.netty.handler.codec.http.HttpObject;
|
||||
import io.netty.handler.codec.http.HttpRequest;
|
||||
import io.netty.handler.codec.http.HttpResponseStatus;
|
||||
import io.netty.handler.codec.http.HttpVersion;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import io.netty.handler.codec.http.QueryStringDecoder;
|
||||
import io.netty.handler.codec.http.multipart.DefaultHttpDataFactory;
|
||||
import io.netty.handler.codec.http.multipart.DiskFileUpload;
|
||||
import io.netty.handler.codec.http.multipart.FileUpload;
|
||||
import io.netty.handler.codec.http.multipart.HttpDataFactory;
|
||||
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder;
|
||||
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.ErrorDataDecoderException;
|
||||
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.IncompatibleDataDecoderException;
|
||||
import io.netty.handler.codec.http.multipart.InterfaceHttpData;
|
||||
import io.netty.handler.codec.http.multipart.InterfaceHttpData.HttpDataType;
|
||||
import io.netty.util.CharsetUtil;
|
||||
|
||||
public class HttpUploadServerHandler extends SimpleChannelInboundHandler<HttpObject> {
|
||||
private static final Logger logger = Logger.getLogger(HttpUploadServerHandler.class.getName());
|
||||
|
||||
private static final HttpDataFactory factory = new DefaultHttpDataFactory(true);
|
||||
|
||||
private final StringBuilder responseContent = new StringBuilder();
|
||||
|
||||
private HttpRequest request;
|
||||
|
||||
private HttpPostRequestDecoder decoder;
|
||||
|
||||
private NfsSecondaryStorageResource storageResource;
|
||||
|
||||
private String uuid;
|
||||
|
||||
private boolean requestProcessed = false;
|
||||
|
||||
private static final String HEADER_SIGNATURE = "X-signature";
|
||||
|
||||
private static final String HEADER_METADATA = "X-metadata";
|
||||
|
||||
private static final String HEADER_EXPIRES = "X-expires";
|
||||
|
||||
private static final String HEADER_HOST = "X-Forwarded-Host";
|
||||
|
||||
public HttpUploadServerHandler(NfsSecondaryStorageResource storageResource) {
|
||||
this.storageResource = storageResource;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelUnregistered(ChannelHandlerContext ctx) throws Exception {
|
||||
if (decoder != null) {
|
||||
decoder.cleanFiles();
|
||||
}
|
||||
requestProcessed = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
|
||||
if (!requestProcessed) {
|
||||
String message = "file receive failed or connection closed prematurely.";
|
||||
logger.error(message);
|
||||
storageResource.updateStateMapWithError(uuid, message);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelRead0(ChannelHandlerContext ctx, HttpObject msg) throws Exception {
|
||||
if (msg instanceof HttpRequest) {
|
||||
HttpRequest request = this.request = (HttpRequest) msg;
|
||||
responseContent.setLength(0);
|
||||
|
||||
if (request.getMethod().equals(HttpMethod.POST)) {
|
||||
|
||||
URI uri = new URI(request.getUri());
|
||||
|
||||
String signature = null;
|
||||
String expires = null;
|
||||
String metadata = null;
|
||||
String hostname = null;
|
||||
long contentLength = 0;
|
||||
|
||||
for (Entry<String, String> entry : request.headers()) {
|
||||
switch (entry.getKey()) {
|
||||
case HEADER_SIGNATURE:
|
||||
signature = entry.getValue();
|
||||
break;
|
||||
case HEADER_METADATA:
|
||||
metadata = entry.getValue();
|
||||
break;
|
||||
case HEADER_EXPIRES:
|
||||
expires = entry.getValue();
|
||||
break;
|
||||
case HEADER_HOST:
|
||||
hostname = entry.getValue();
|
||||
break;
|
||||
case HttpHeaders.Names.CONTENT_LENGTH:
|
||||
contentLength = Long.valueOf(entry.getValue());
|
||||
break;
|
||||
}
|
||||
}
|
||||
logger.info("HEADER: signature=" + signature);
|
||||
logger.info("HEADER: metadata=" + metadata);
|
||||
logger.info("HEADER: expires=" + expires);
|
||||
logger.info("HEADER: hostname=" + hostname);
|
||||
logger.info("HEADER: Content-Length=" + contentLength);
|
||||
QueryStringDecoder decoderQuery = new QueryStringDecoder(uri);
|
||||
Map<String, List<String>> uriAttributes = decoderQuery.parameters();
|
||||
uuid = uriAttributes.get("uuid").get(0);
|
||||
logger.info("URI: uuid=" + uuid);
|
||||
|
||||
UploadEntity uploadEntity = null;
|
||||
try {
|
||||
// Validate the request here
|
||||
storageResource.validatePostUploadRequest(signature, metadata, expires, hostname, contentLength, uuid);
|
||||
//create an upload entity. This will fail if entity already exists.
|
||||
uploadEntity = storageResource.createUploadEntity(uuid, metadata, contentLength);
|
||||
} catch (InvalidParameterValueException ex) {
|
||||
logger.error("post request validation failed", ex);
|
||||
responseContent.append(ex.getMessage());
|
||||
writeResponse(ctx.channel(), HttpResponseStatus.BAD_REQUEST);
|
||||
requestProcessed = true;
|
||||
return;
|
||||
}
|
||||
if (uploadEntity == null) {
|
||||
logger.error("Unable to create upload entity. An exception occurred.");
|
||||
responseContent.append("Internal Server Error");
|
||||
writeResponse(ctx.channel(), HttpResponseStatus.INTERNAL_SERVER_ERROR);
|
||||
requestProcessed = true;
|
||||
return;
|
||||
}
|
||||
//set the base directory to download the file
|
||||
DiskFileUpload.baseDirectory = uploadEntity.getInstallPathPrefix();
|
||||
logger.info("base directory: " + DiskFileUpload.baseDirectory);
|
||||
try {
|
||||
//initialize the decoder
|
||||
decoder = new HttpPostRequestDecoder(factory, request);
|
||||
} catch (ErrorDataDecoderException | IncompatibleDataDecoderException e) {
|
||||
logger.error("exception while initialising the decoder", e);
|
||||
responseContent.append(e.getMessage());
|
||||
writeResponse(ctx.channel(), HttpResponseStatus.INTERNAL_SERVER_ERROR);
|
||||
requestProcessed = true;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
logger.warn("received a get request");
|
||||
responseContent.append("only post requests are allowed");
|
||||
writeResponse(ctx.channel(), HttpResponseStatus.BAD_REQUEST);
|
||||
requestProcessed = true;
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
// check if the decoder was constructed before
|
||||
if (decoder != null) {
|
||||
if (msg instanceof HttpContent) {
|
||||
// New chunk is received
|
||||
HttpContent chunk = (HttpContent) msg;
|
||||
try {
|
||||
decoder.offer(chunk);
|
||||
} catch (ErrorDataDecoderException e) {
|
||||
logger.error("data decoding exception", e);
|
||||
responseContent.append(e.getMessage());
|
||||
writeResponse(ctx.channel(), HttpResponseStatus.INTERNAL_SERVER_ERROR);
|
||||
requestProcessed = true;
|
||||
return;
|
||||
}
|
||||
if (chunk instanceof LastHttpContent) {
|
||||
writeResponse(ctx.channel(), readFileUploadData());
|
||||
reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void reset() {
|
||||
request = null;
|
||||
// destroy the decoder to release all resources
|
||||
decoder.destroy();
|
||||
decoder = null;
|
||||
}
|
||||
|
||||
private HttpResponseStatus readFileUploadData() throws IOException {
|
||||
while (decoder.hasNext()) {
|
||||
InterfaceHttpData data = decoder.next();
|
||||
if (data != null) {
|
||||
try {
|
||||
logger.info("BODY FileUpload: " + data.getHttpDataType().name() + ": " + data);
|
||||
if (data.getHttpDataType() == HttpDataType.FileUpload) {
|
||||
FileUpload fileUpload = (FileUpload) data;
|
||||
if (fileUpload.isCompleted()) {
|
||||
requestProcessed = true;
|
||||
String format = ImageStoreUtil.checkTemplateFormat(fileUpload.getFile().getAbsolutePath(), fileUpload.getFilename());
|
||||
if(StringUtils.isNotBlank(format)) {
|
||||
String errorString = "File type mismatch between the sent file and the actual content. Received: " + format;
|
||||
logger.error(errorString);
|
||||
responseContent.append(errorString);
|
||||
storageResource.updateStateMapWithError(uuid, errorString);
|
||||
return HttpResponseStatus.BAD_REQUEST;
|
||||
}
|
||||
String status = storageResource.postUpload(uuid, fileUpload.getFile().getName());
|
||||
if (status != null) {
|
||||
responseContent.append(status);
|
||||
storageResource.updateStateMapWithError(uuid, status);
|
||||
return HttpResponseStatus.INTERNAL_SERVER_ERROR;
|
||||
} else {
|
||||
responseContent.append("upload successful.");
|
||||
return HttpResponseStatus.OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
data.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
responseContent.append("received entity is not a file");
|
||||
return HttpResponseStatus.UNPROCESSABLE_ENTITY;
|
||||
}
|
||||
|
||||
private void writeResponse(Channel channel, HttpResponseStatus statusCode) {
|
||||
// Convert the response content to a ChannelBuffer.
|
||||
ByteBuf buf = copiedBuffer(responseContent.toString(), CharsetUtil.UTF_8);
|
||||
responseContent.setLength(0);
|
||||
// Decide whether to close the connection or not.
|
||||
boolean close = HttpHeaders.Values.CLOSE.equalsIgnoreCase(request.headers().get(CONNECTION)) ||
|
||||
request.getProtocolVersion().equals(HttpVersion.HTTP_1_0) && !HttpHeaders.Values.KEEP_ALIVE.equalsIgnoreCase(request.headers().get(CONNECTION));
|
||||
// Build the response object.
|
||||
FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, statusCode, buf);
|
||||
response.headers().set(CONTENT_TYPE, "text/plain; charset=UTF-8");
|
||||
if (!close) {
|
||||
// There's no need to add 'Content-Length' header if this is the last response.
|
||||
response.headers().set(CONTENT_LENGTH, buf.readableBytes());
|
||||
}
|
||||
// Write the response.
|
||||
ChannelFuture future = channel.writeAndFlush(response);
|
||||
// Close the connection after the write operation is done if necessary.
|
||||
if (close) {
|
||||
future.addListener(ChannelFutureListener.CLOSE);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
||||
logger.warn(responseContent.toString(), cause);
|
||||
responseContent.append("\r\nException occurred: ").append(cause.getMessage());
|
||||
writeResponse(ctx.channel(), HttpResponseStatus.INTERNAL_SERVER_ERROR);
|
||||
ctx.channel().close();
|
||||
}
|
||||
}
|
||||
|
|
@ -46,7 +46,31 @@ import java.util.UUID;
|
|||
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.template.TemplateConstants;
|
||||
import com.cloud.utils.EncryptionUtil;
|
||||
import com.google.gson.Gson;
|
||||
import com.google.gson.GsonBuilder;
|
||||
import io.netty.bootstrap.ServerBootstrap;
|
||||
import io.netty.channel.Channel;
|
||||
import io.netty.channel.ChannelInitializer;
|
||||
import io.netty.channel.ChannelPipeline;
|
||||
import io.netty.channel.EventLoopGroup;
|
||||
import io.netty.channel.nio.NioEventLoopGroup;
|
||||
import io.netty.channel.socket.SocketChannel;
|
||||
import io.netty.channel.socket.nio.NioServerSocketChannel;
|
||||
import io.netty.handler.codec.http.HttpContentCompressor;
|
||||
import io.netty.handler.codec.http.HttpRequestDecoder;
|
||||
import io.netty.handler.codec.http.HttpResponseEncoder;
|
||||
import io.netty.handler.logging.LogLevel;
|
||||
import io.netty.handler.logging.LoggingHandler;
|
||||
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
|
||||
import org.apache.cloudstack.storage.template.UploadEntity;
|
||||
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
|
||||
import org.apache.commons.codec.digest.DigestUtils;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.FilenameUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpResponse;
|
||||
|
|
@ -65,6 +89,9 @@ import org.apache.cloudstack.storage.command.CopyCommand;
|
|||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusAnswer;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusAnswer.UploadStatus;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusCommand;
|
||||
import org.apache.cloudstack.storage.template.DownloadManager;
|
||||
import org.apache.cloudstack.storage.template.DownloadManagerImpl;
|
||||
import org.apache.cloudstack.storage.template.DownloadManagerImpl.ZfsPathParser;
|
||||
|
|
@ -135,6 +162,8 @@ import com.cloud.utils.net.NetUtils;
|
|||
import com.cloud.utils.script.OutputInterpreter;
|
||||
import com.cloud.utils.script.Script;
|
||||
import com.cloud.vm.SecondaryStorageVm;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.format.ISODateTimeFormat;
|
||||
|
||||
public class NfsSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource {
|
||||
|
||||
|
|
@ -142,6 +171,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
|
||||
private static final String TEMPLATE_ROOT_DIR = "template/tmpl";
|
||||
private static final String VOLUME_ROOT_DIR = "volumes";
|
||||
private static final String POST_UPLOAD_KEY_LOCATION = "/etc/cloudstack/agent/ms-psk";
|
||||
|
||||
int _timeout;
|
||||
|
||||
|
|
@ -180,6 +210,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
protected String _parent = "/mnt/SecStorage";
|
||||
final private String _tmpltpp = "template.properties";
|
||||
protected String createTemplateFromSnapshotXenScript;
|
||||
private HashMap<String,UploadEntity> uploadEntityStateMap = new HashMap<String,UploadEntity>();
|
||||
private String _ssvmPSK = null;
|
||||
|
||||
public void setParentPath(String path) {
|
||||
_parent = path;
|
||||
|
|
@ -233,6 +265,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
return execute((CopyCommand)cmd);
|
||||
} else if (cmd instanceof DeleteCommand) {
|
||||
return execute((DeleteCommand)cmd);
|
||||
} else if (cmd instanceof UploadStatusCommand) {
|
||||
return execute((UploadStatusCommand)cmd);
|
||||
} else {
|
||||
return Answer.createUnsupportedCommandAnswer(cmd);
|
||||
}
|
||||
|
|
@ -1264,6 +1298,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
if (!_inSystemVM) {
|
||||
return new Answer(cmd, true, null);
|
||||
}
|
||||
Answer answer = null;
|
||||
DataStoreTO dStore = cmd.getDataStore();
|
||||
if (dStore instanceof NfsTO) {
|
||||
String secUrl = cmd.getSecUrl();
|
||||
|
|
@ -1277,17 +1312,69 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
configCerts(cmd.getCerts());
|
||||
|
||||
nfsIps.add(nfsHostIp);
|
||||
return new SecStorageSetupAnswer(dir);
|
||||
answer = new SecStorageSetupAnswer(dir);
|
||||
} catch (Exception e) {
|
||||
String msg = "GetRootDir for " + secUrl + " failed due to " + e.toString();
|
||||
s_logger.error(msg);
|
||||
return new Answer(cmd, false, msg);
|
||||
answer = new Answer(cmd, false, msg);
|
||||
|
||||
}
|
||||
} else {
|
||||
// TODO: what do we need to setup for S3/Swift, maybe need to mount
|
||||
// to some cache storage
|
||||
return new Answer(cmd, true, null);
|
||||
answer = new Answer(cmd, true, null);
|
||||
}
|
||||
|
||||
savePostUploadPSK(cmd.getPostUploadKey());
|
||||
startPostUploadServer();
|
||||
return answer;
|
||||
}
|
||||
|
||||
private void startPostUploadServer() {
|
||||
final int PORT = 8210;
|
||||
final int NO_OF_WORKERS = 15;
|
||||
final EventLoopGroup bossGroup = new NioEventLoopGroup(1);
|
||||
final EventLoopGroup workerGroup = new NioEventLoopGroup(NO_OF_WORKERS);
|
||||
final ServerBootstrap b = new ServerBootstrap();
|
||||
final NfsSecondaryStorageResource storageResource = this;
|
||||
b.group(bossGroup, workerGroup);
|
||||
b.channel(NioServerSocketChannel.class);
|
||||
b.handler(new LoggingHandler(LogLevel.INFO));
|
||||
b.childHandler(new ChannelInitializer<SocketChannel>() {
|
||||
@Override
|
||||
protected void initChannel(SocketChannel ch) throws Exception {
|
||||
ChannelPipeline pipeline = ch.pipeline();
|
||||
pipeline.addLast(new HttpRequestDecoder());
|
||||
pipeline.addLast(new HttpResponseEncoder());
|
||||
pipeline.addLast(new HttpContentCompressor());
|
||||
pipeline.addLast(new HttpUploadServerHandler(storageResource));
|
||||
}
|
||||
});
|
||||
new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
Channel ch = b.bind(PORT).sync().channel();
|
||||
s_logger.info(String.format("Started post upload server on port %d with %d workers",PORT,NO_OF_WORKERS));
|
||||
ch.closeFuture().sync();
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.info("Failed to start post upload server");
|
||||
s_logger.debug("Exception while starting post upload server", e);
|
||||
} finally {
|
||||
bossGroup.shutdownGracefully();
|
||||
workerGroup.shutdownGracefully();
|
||||
s_logger.info("shutting down post upload server");
|
||||
}
|
||||
}
|
||||
}.start();
|
||||
s_logger.info("created a thread to start post upload server");
|
||||
}
|
||||
|
||||
private void savePostUploadPSK(String psk) {
|
||||
try {
|
||||
FileUtils.writeStringToFile(new File(POST_UPLOAD_KEY_LOCATION),psk, "utf-8");
|
||||
} catch (IOException ex) {
|
||||
s_logger.debug("Failed to copy PSK to the file.", ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1591,6 +1678,32 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
return new Answer(cmd, success, result);
|
||||
}
|
||||
|
||||
private UploadStatusAnswer execute(UploadStatusCommand cmd) {
|
||||
String entityUuid = cmd.getEntityUuid();
|
||||
if (uploadEntityStateMap.containsKey(entityUuid)) {
|
||||
UploadEntity uploadEntity = uploadEntityStateMap.get(entityUuid);
|
||||
if (uploadEntity.getUploadState() == UploadEntity.Status.ERROR) {
|
||||
uploadEntityStateMap.remove(entityUuid);
|
||||
return new UploadStatusAnswer(cmd, UploadStatus.ERROR, uploadEntity.getErrorMessage());
|
||||
} else if (uploadEntity.getUploadState() == UploadEntity.Status.COMPLETED) {
|
||||
UploadStatusAnswer answer = new UploadStatusAnswer(cmd, UploadStatus.COMPLETED);
|
||||
answer.setVirtualSize(uploadEntity.getVirtualSize());
|
||||
answer.setInstallPath(uploadEntity.getTmpltPath());
|
||||
answer.setPhysicalSize(uploadEntity.getPhysicalSize());
|
||||
answer.setDownloadPercent(100);
|
||||
uploadEntityStateMap.remove(entityUuid);
|
||||
return answer;
|
||||
} else if (uploadEntity.getUploadState() == UploadEntity.Status.IN_PROGRESS) {
|
||||
UploadStatusAnswer answer = new UploadStatusAnswer(cmd, UploadStatus.IN_PROGRESS);
|
||||
long downloadedSize = FileUtils.sizeOfDirectory(new File(uploadEntity.getInstallPathPrefix()));
|
||||
int downloadPercent = (int) (100 * downloadedSize / uploadEntity.getContentLength());
|
||||
answer.setDownloadPercent(Math.min(downloadPercent, 100));
|
||||
return answer;
|
||||
}
|
||||
}
|
||||
return new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN);
|
||||
}
|
||||
|
||||
protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) {
|
||||
DataStoreTO store = cmd.getStore();
|
||||
if (store instanceof S3TO || store instanceof SwiftTO) {
|
||||
|
|
@ -2479,4 +2592,318 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
super.fillNetworkInformation(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
private String getScriptLocation(UploadEntity.ResourceType resourceType) {
|
||||
|
||||
String scriptsDir = (String) _params.get("template.scripts.dir");
|
||||
if (scriptsDir == null) {
|
||||
scriptsDir = "scripts/storage/secondary";
|
||||
}
|
||||
String scriptname = null;
|
||||
if (resourceType == UploadEntity.ResourceType.VOLUME) {
|
||||
scriptname = "createvolume.sh";
|
||||
} else if (resourceType == UploadEntity.ResourceType.TEMPLATE) {
|
||||
scriptname = "createtmplt.sh";
|
||||
} else {
|
||||
throw new InvalidParameterValueException("cannot find script for resource type: " + resourceType);
|
||||
}
|
||||
return Script.findScript(scriptsDir, scriptname);
|
||||
}
|
||||
|
||||
public UploadEntity createUploadEntity(String uuid, String metadata, long contentLength) {
|
||||
TemplateOrVolumePostUploadCommand cmd = getTemplateOrVolumePostUploadCmd(metadata);
|
||||
UploadEntity uploadEntity = null;
|
||||
if(cmd == null ){
|
||||
String errorMessage = "unable decode and deserialize metadata.";
|
||||
updateStateMapWithError(uuid, errorMessage);
|
||||
throw new InvalidParameterValueException(errorMessage);
|
||||
} else {
|
||||
uuid = cmd.getEntityUUID();
|
||||
if (isOneTimePostUrlUsed(cmd)) {
|
||||
uploadEntity = uploadEntityStateMap.get(uuid);
|
||||
StringBuilder errorMessage = new StringBuilder("The one time post url is already used");
|
||||
if (uploadEntity != null) {
|
||||
errorMessage.append(" and the upload is in ").append(uploadEntity.getUploadState()).append(" state.");
|
||||
}
|
||||
throw new InvalidParameterValueException(errorMessage.toString());
|
||||
}
|
||||
int maxSizeInGB = Integer.valueOf(cmd.getMaxUploadSize());
|
||||
int contentLengthInGB = getSizeInGB(contentLength);
|
||||
if (contentLengthInGB > maxSizeInGB) {
|
||||
String errorMessage = "Maximum file upload size exceeded. Content Length received: " + contentLengthInGB + "GB. Maximum allowed size: " + maxSizeInGB + "GB.";
|
||||
updateStateMapWithError(uuid, errorMessage);
|
||||
throw new InvalidParameterValueException(errorMessage);
|
||||
}
|
||||
checkSecondaryStorageResourceLimit(cmd, contentLengthInGB);
|
||||
try {
|
||||
String absolutePath = cmd.getAbsolutePath();
|
||||
uploadEntity = new UploadEntity(uuid, cmd.getEntityId(), UploadEntity.Status.IN_PROGRESS, cmd.getName(), absolutePath);
|
||||
uploadEntity.setMetaDataPopulated(true);
|
||||
uploadEntity.setResourceType(UploadEntity.ResourceType.valueOf(cmd.getType()));
|
||||
uploadEntity.setFormat(Storage.ImageFormat.valueOf(cmd.getImageFormat()));
|
||||
//relative path with out ssvm mount info.
|
||||
uploadEntity.setTemplatePath(absolutePath);
|
||||
String dataStoreUrl = cmd.getDataTo();
|
||||
String installPathPrefix = this.getRootDir(dataStoreUrl) + File.separator + absolutePath;
|
||||
uploadEntity.setInstallPathPrefix(installPathPrefix);
|
||||
uploadEntity.setHvm(cmd.getRequiresHvm());
|
||||
uploadEntity.setChksum(cmd.getChecksum());
|
||||
uploadEntity.setMaxSizeInGB(maxSizeInGB);
|
||||
uploadEntity.setDescription(cmd.getDescription());
|
||||
uploadEntity.setContentLength(contentLength);
|
||||
// create a install dir
|
||||
if (!_storage.exists(installPathPrefix)) {
|
||||
_storage.mkdir(installPathPrefix);
|
||||
}
|
||||
uploadEntityStateMap.put(uuid, uploadEntity);
|
||||
} catch (Exception e) {
|
||||
//upload entity will be null incase an exception occurs and the handler will not proceed.
|
||||
s_logger.error("exception occurred while creating upload entity ", e);
|
||||
updateStateMapWithError(uuid, e.getMessage());
|
||||
}
|
||||
}
|
||||
return uploadEntity;
|
||||
}
|
||||
|
||||
private synchronized void checkSecondaryStorageResourceLimit(TemplateOrVolumePostUploadCommand cmd, int contentLengthInGB) {
|
||||
String rootDir = this.getRootDir(cmd.getDataTo()) + File.separator;
|
||||
long accountId = cmd.getAccountId();
|
||||
|
||||
long accountTemplateDirSize = 0;
|
||||
File accountTemplateDir = new File(rootDir + getTemplatePathForAccount(accountId));
|
||||
if(accountTemplateDir.exists()) {
|
||||
FileUtils.sizeOfDirectory(accountTemplateDir);
|
||||
}
|
||||
long accountVolumeDirSize = 0;
|
||||
File accountVolumeDir = new File(rootDir + getVolumePathForAccount(accountId));
|
||||
if(accountVolumeDir.exists()) {
|
||||
accountVolumeDirSize = FileUtils.sizeOfDirectory(accountVolumeDir);
|
||||
}
|
||||
long accountSnapshotDirSize = 0;
|
||||
File accountSnapshotDir = new File(rootDir + getSnapshotPathForAccount(accountId));
|
||||
if(accountSnapshotDir.exists()) {
|
||||
accountSnapshotDirSize = FileUtils.sizeOfDirectory(accountSnapshotDir);
|
||||
}
|
||||
s_logger.debug("accountTemplateDirSize: " + accountTemplateDirSize + " accountSnapshotDirSize: " +accountSnapshotDirSize + " accountVolumeDirSize: " +
|
||||
accountVolumeDirSize);
|
||||
|
||||
int accountDirSizeInGB = getSizeInGB(accountTemplateDirSize + accountSnapshotDirSize + accountVolumeDirSize);
|
||||
int defaultMaxAccountSecondaryStorageInGB = Integer.parseInt(cmd.getDefaultMaxAccountSecondaryStorage());
|
||||
|
||||
if ((accountDirSizeInGB + contentLengthInGB) > defaultMaxAccountSecondaryStorageInGB) {
|
||||
s_logger.error("accountDirSizeInGb: " + accountDirSizeInGB + " defaultMaxAccountSecondaryStorageInGB: " + defaultMaxAccountSecondaryStorageInGB + " contentLengthInGB:"
|
||||
+ contentLengthInGB);
|
||||
String errorMessage = "Maximum number of resources of type secondary_storage for account has exceeded";
|
||||
updateStateMapWithError(cmd.getEntityUUID(), errorMessage);
|
||||
throw new InvalidParameterValueException(errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
private String getVolumePathForAccount(long accountId) {
|
||||
return TemplateConstants.DEFAULT_VOLUME_ROOT_DIR + "/" + accountId;
|
||||
}
|
||||
|
||||
private String getTemplatePathForAccount(long accountId) {
|
||||
return TemplateConstants.DEFAULT_TMPLT_ROOT_DIR + "/" + TemplateConstants.DEFAULT_TMPLT_FIRST_LEVEL_DIR + accountId;
|
||||
}
|
||||
|
||||
private String getSnapshotPathForAccount(long accountId) {
|
||||
return TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR + "/" + accountId;
|
||||
}
|
||||
|
||||
private boolean isOneTimePostUrlUsed(TemplateOrVolumePostUploadCommand cmd) {
|
||||
String uuid = cmd.getEntityUUID();
|
||||
String uploadPath = this.getRootDir(cmd.getDataTo()) + File.separator + cmd.getAbsolutePath();
|
||||
return uploadEntityStateMap.containsKey(uuid) || new File(uploadPath).exists();
|
||||
}
|
||||
|
||||
private int getSizeInGB(long sizeInBytes) {
|
||||
return (int)Math.ceil(sizeInBytes * 1.0d / (1024 * 1024 * 1024));
|
||||
}
|
||||
|
||||
public String postUpload(String uuid, String filename) {
|
||||
UploadEntity uploadEntity = uploadEntityStateMap.get(uuid);
|
||||
int installTimeoutPerGig = 180 * 60 * 1000;
|
||||
|
||||
String resourcePath = uploadEntity.getInstallPathPrefix();
|
||||
String finalResourcePath = uploadEntity.getTmpltPath(); // template download
|
||||
UploadEntity.ResourceType resourceType = uploadEntity.getResourceType();
|
||||
|
||||
String fileSavedTempLocation = uploadEntity.getInstallPathPrefix() + "/" + filename;
|
||||
|
||||
String uploadedFileExtension = FilenameUtils.getExtension(filename);
|
||||
String userSelectedFormat= uploadEntity.getFormat().toString();
|
||||
if(uploadedFileExtension.equals("zip") || uploadedFileExtension.equals("bz2") || uploadedFileExtension.equals("gz")) {
|
||||
userSelectedFormat += "." + uploadedFileExtension;
|
||||
}
|
||||
String formatError = ImageStoreUtil.checkTemplateFormat(fileSavedTempLocation, userSelectedFormat);
|
||||
if(StringUtils.isNotBlank(formatError)) {
|
||||
String errorString = "File type mismatch between uploaded file and selected format. Selected file format: " + userSelectedFormat + ". Received: " + formatError;
|
||||
s_logger.error(errorString);
|
||||
return errorString;
|
||||
}
|
||||
|
||||
int imgSizeGigs = getSizeInGB(_storage.getSize(fileSavedTempLocation));
|
||||
int maxSize = uploadEntity.getMaxSizeInGB();
|
||||
if(imgSizeGigs > maxSize) {
|
||||
String errorMessage = "Maximum file upload size exceeded. Physical file size: " + imgSizeGigs + "GB. Maximum allowed size: " + maxSize + "GB.";
|
||||
s_logger.error(errorMessage);
|
||||
return errorMessage;
|
||||
}
|
||||
imgSizeGigs++; // add one just in case
|
||||
long timeout = (long)imgSizeGigs * installTimeoutPerGig;
|
||||
Script scr = new Script(getScriptLocation(resourceType), timeout, s_logger);
|
||||
scr.add("-s", Integer.toString(imgSizeGigs));
|
||||
scr.add("-S", Long.toString(UploadEntity.s_maxTemplateSize));
|
||||
if (uploadEntity.getDescription() != null && uploadEntity.getDescription().length() > 1) {
|
||||
scr.add("-d", uploadEntity.getDescription());
|
||||
}
|
||||
if (uploadEntity.isHvm()) {
|
||||
scr.add("-h");
|
||||
}
|
||||
String checkSum = uploadEntity.getChksum();
|
||||
if (StringUtils.isNotBlank(checkSum)) {
|
||||
scr.add("-c", checkSum);
|
||||
}
|
||||
|
||||
// add options common to ISO and template
|
||||
String extension = uploadEntity.getFormat().getFileExtension();
|
||||
String templateName = "";
|
||||
if (extension.equals("iso")) {
|
||||
templateName = uploadEntity.getUuid().trim().replace(" ", "_");
|
||||
} else {
|
||||
templateName = java.util.UUID.nameUUIDFromBytes((uploadEntity.getFilename() + System.currentTimeMillis()).getBytes()).toString();
|
||||
}
|
||||
|
||||
// run script to mv the temporary template file to the final template
|
||||
// file
|
||||
String templateFilename = templateName + "." + extension;
|
||||
uploadEntity.setTemplatePath(finalResourcePath + "/" + templateFilename);
|
||||
scr.add("-n", templateFilename);
|
||||
|
||||
scr.add("-t", resourcePath);
|
||||
scr.add("-f", fileSavedTempLocation); // this is the temporary
|
||||
// template file downloaded
|
||||
if (uploadEntity.getChksum() != null && uploadEntity.getChksum().length() > 1) {
|
||||
scr.add("-c", uploadEntity.getChksum());
|
||||
}
|
||||
scr.add("-u"); // cleanup
|
||||
String result;
|
||||
result = scr.execute();
|
||||
|
||||
if (result != null) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Set permissions for the downloaded template
|
||||
File downloadedTemplate = new File(resourcePath + "/" + templateFilename);
|
||||
_storage.setWorldReadableAndWriteable(downloadedTemplate);
|
||||
|
||||
// Set permissions for template/volume.properties
|
||||
String propertiesFile = resourcePath;
|
||||
if (resourceType == UploadEntity.ResourceType.TEMPLATE) {
|
||||
propertiesFile += "/template.properties";
|
||||
} else {
|
||||
propertiesFile += "/volume.properties";
|
||||
}
|
||||
File templateProperties = new File(propertiesFile);
|
||||
_storage.setWorldReadableAndWriteable(templateProperties);
|
||||
|
||||
TemplateLocation loc = new TemplateLocation(_storage, resourcePath);
|
||||
try {
|
||||
loc.create(uploadEntity.getEntityId(), true, uploadEntity.getFilename());
|
||||
} catch (IOException e) {
|
||||
s_logger.warn("Something is wrong with template location " + resourcePath, e);
|
||||
loc.purge();
|
||||
return "Unable to upload due to " + e.getMessage();
|
||||
}
|
||||
|
||||
Map<String, Processor> processors = _dlMgr.getProcessors();
|
||||
for (Processor processor : processors.values()) {
|
||||
FormatInfo info = null;
|
||||
try {
|
||||
info = processor.process(resourcePath, null, templateName);
|
||||
} catch (InternalErrorException e) {
|
||||
s_logger.error("Template process exception ", e);
|
||||
return e.toString();
|
||||
}
|
||||
if (info != null) {
|
||||
loc.addFormat(info);
|
||||
uploadEntity.setVirtualSize(info.virtualSize);
|
||||
uploadEntity.setPhysicalSize(info.size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!loc.save()) {
|
||||
s_logger.warn("Cleaning up because we're unable to save the formats");
|
||||
loc.purge();
|
||||
}
|
||||
uploadEntity.setStatus(UploadEntity.Status.COMPLETED);
|
||||
uploadEntityStateMap.put(uploadEntity.getUuid(), uploadEntity);
|
||||
return null;
|
||||
}
|
||||
|
||||
private String getPostUploadPSK() {
|
||||
if(_ssvmPSK == null ) {
|
||||
try {
|
||||
_ssvmPSK = FileUtils.readFileToString(new File(POST_UPLOAD_KEY_LOCATION), "utf-8");
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("Error while reading SSVM PSK from location " + POST_UPLOAD_KEY_LOCATION, e);
|
||||
}
|
||||
}
|
||||
return _ssvmPSK;
|
||||
}
|
||||
|
||||
public void updateStateMapWithError(String uuid,String errorMessage) {
|
||||
UploadEntity uploadEntity=null;
|
||||
if (uploadEntityStateMap.get(uuid)!=null) {
|
||||
uploadEntity=uploadEntityStateMap.get(uuid);
|
||||
}else {
|
||||
uploadEntity= new UploadEntity();
|
||||
}
|
||||
uploadEntity.setStatus(UploadEntity.Status.ERROR);
|
||||
uploadEntity.setErrorMessage(errorMessage);
|
||||
uploadEntityStateMap.put(uuid, uploadEntity);
|
||||
}
|
||||
|
||||
public void validatePostUploadRequest(String signature, String metadata, String timeout, String hostname,long contentLength, String uuid) throws InvalidParameterValueException{
|
||||
// check none of the params are empty
|
||||
if(StringUtils.isEmpty(signature) || StringUtils.isEmpty(metadata) || StringUtils.isEmpty(timeout)) {
|
||||
updateStateMapWithError(uuid,"signature, metadata and expires are compulsory fields.");
|
||||
throw new InvalidParameterValueException("signature, metadata and expires are compulsory fields.");
|
||||
}
|
||||
|
||||
//check that contentLength exists and is greater than zero
|
||||
if (contentLength <= 0) {
|
||||
throw new InvalidParameterValueException("content length is not set in the request or has invalid value.");
|
||||
}
|
||||
|
||||
//validate signature
|
||||
String fullUrl = "https://" + hostname + "/upload/" + uuid;
|
||||
String computedSignature = EncryptionUtil.generateSignature(metadata + fullUrl + timeout, getPostUploadPSK());
|
||||
boolean isSignatureValid = computedSignature.equals(signature);
|
||||
if(!isSignatureValid) {
|
||||
updateStateMapWithError(uuid,"signature validation failed.");
|
||||
throw new InvalidParameterValueException("signature validation failed.");
|
||||
}
|
||||
|
||||
//validate timeout
|
||||
DateTime timeoutDateTime = DateTime.parse(timeout, ISODateTimeFormat.dateTime());
|
||||
if(timeoutDateTime.isBeforeNow()) {
|
||||
updateStateMapWithError(uuid,"request not valid anymore.");
|
||||
throw new InvalidParameterValueException("request not valid anymore.");
|
||||
}
|
||||
}
|
||||
|
||||
private TemplateOrVolumePostUploadCommand getTemplateOrVolumePostUploadCmd(String metadata) {
|
||||
TemplateOrVolumePostUploadCommand cmd = null;
|
||||
try {
|
||||
Gson gson = new GsonBuilder().create();
|
||||
cmd = gson.fromJson(EncryptionUtil.decodeData(metadata, getPostUploadPSK()), TemplateOrVolumePostUploadCommand.class);
|
||||
} catch(Exception ex) {
|
||||
s_logger.error("exception while decoding and deserialising metadata", ex);
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package org.apache.cloudstack.storage.template;
|
|||
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.storage.template.Processor;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
|
||||
import org.apache.cloudstack.storage.resource.SecondaryStorageResource;
|
||||
|
|
@ -52,6 +53,8 @@ public interface DownloadManager extends Manager {
|
|||
public String downloadS3Template(S3TO s3, long id, String url, String name, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum,
|
||||
String installPathPrefix, String user, String password, long maxTemplateSizeInBytes, Proxy proxy, ResourceType resourceType);
|
||||
|
||||
Map<String, Processor> getProcessors();
|
||||
|
||||
/**
|
||||
* Get the status of a download job
|
||||
* @param jobId job Id
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ import com.cloud.utils.script.Script;
|
|||
public class DownloadManagerImpl extends ManagerBase implements DownloadManager {
|
||||
private String _name;
|
||||
StorageLayer _storage;
|
||||
Map<String, Processor> _processors;
|
||||
public Map<String, Processor> _processors;
|
||||
|
||||
public class Completion implements DownloadCompleteCallback {
|
||||
private final String jobId;
|
||||
|
|
@ -103,6 +103,11 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Processor> getProcessors() {
|
||||
return _processors;
|
||||
}
|
||||
|
||||
private static class DownloadJob {
|
||||
private final TemplateDownloader td;
|
||||
private final String tmpltName;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,201 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.storage.template;
|
||||
|
||||
|
||||
import com.cloud.storage.Storage;
|
||||
|
||||
public class UploadEntity {
|
||||
private long downloadedsize;
|
||||
private String filename;
|
||||
private String installPathPrefix;
|
||||
private String templatePath;
|
||||
private boolean isHvm;
|
||||
private Storage.ImageFormat format;
|
||||
private String uuid;
|
||||
private long entityId;
|
||||
private String chksum;
|
||||
private long physicalSize;
|
||||
private int maxSizeInGB;
|
||||
private String description;
|
||||
private long contentLength;
|
||||
|
||||
public static enum ResourceType {
|
||||
VOLUME, TEMPLATE
|
||||
}
|
||||
|
||||
public static enum Status {
|
||||
UNKNOWN, IN_PROGRESS, COMPLETED, ERROR
|
||||
}
|
||||
|
||||
private Status uploadState;
|
||||
private String errorMessage=null;
|
||||
private ResourceType resourceType;
|
||||
private long virtualSize;
|
||||
private boolean isMetaDataPopulated;
|
||||
|
||||
public static long s_maxTemplateSize = 50L * 1024L * 1024L * 1024L;
|
||||
|
||||
public UploadEntity(String uuid, long entityId, Status status, String filename, String installPathPrefix){
|
||||
this.uuid=uuid;
|
||||
this.uploadState=status;
|
||||
this.downloadedsize=0l;
|
||||
this.filename=filename;
|
||||
this.installPathPrefix = installPathPrefix;
|
||||
this.entityId=entityId;
|
||||
}
|
||||
|
||||
public UploadEntity(){
|
||||
|
||||
}
|
||||
|
||||
public void setStatus(Status status) {
|
||||
this.uploadState = status;
|
||||
}
|
||||
|
||||
public void setErrorMessage(String errorMessage) {
|
||||
this.errorMessage=errorMessage;
|
||||
}
|
||||
|
||||
public long getDownloadedsize() {
|
||||
return downloadedsize;
|
||||
}
|
||||
|
||||
public String getErrorMessage() {
|
||||
return errorMessage;
|
||||
}
|
||||
|
||||
public Status getUploadState() {
|
||||
return uploadState;
|
||||
}
|
||||
|
||||
public void incremetByteCount(long numberOfBytes) {
|
||||
this.downloadedsize+= numberOfBytes;
|
||||
}
|
||||
|
||||
public String getFilename() {
|
||||
return filename;
|
||||
}
|
||||
|
||||
public void setFilename(String filename) {
|
||||
this.filename = filename;
|
||||
}
|
||||
public String getInstallPathPrefix() {
|
||||
return installPathPrefix;
|
||||
}
|
||||
|
||||
public void setInstallPathPrefix(String absoluteFilePath) {
|
||||
this.installPathPrefix = absoluteFilePath;
|
||||
}
|
||||
|
||||
public String getTmpltPath() {
|
||||
return templatePath;
|
||||
}
|
||||
|
||||
public void setTemplatePath(String templatePath) {
|
||||
this.templatePath=templatePath;
|
||||
}
|
||||
|
||||
public ResourceType getResourceType() {
|
||||
return resourceType;
|
||||
}
|
||||
|
||||
public void setResourceType(ResourceType resourceType) {
|
||||
this.resourceType = resourceType;
|
||||
}
|
||||
|
||||
public boolean isHvm() {
|
||||
return isHvm;
|
||||
}
|
||||
|
||||
public void setHvm(boolean isHvm) {
|
||||
this.isHvm = isHvm;
|
||||
}
|
||||
|
||||
public Storage.ImageFormat getFormat() {
|
||||
return format;
|
||||
}
|
||||
|
||||
public void setFormat(Storage.ImageFormat format) {
|
||||
this.format = format;
|
||||
}
|
||||
|
||||
public String getUuid() {
|
||||
return uuid;
|
||||
}
|
||||
|
||||
public long getEntityId() {
|
||||
return entityId;
|
||||
}
|
||||
|
||||
public String getChksum() {
|
||||
return chksum;
|
||||
}
|
||||
|
||||
public void setChksum(String chksum) {
|
||||
this.chksum = chksum;
|
||||
}
|
||||
|
||||
public long getVirtualSize() {
|
||||
return virtualSize;
|
||||
}
|
||||
|
||||
public void setVirtualSize(long virtualSize) {
|
||||
this.virtualSize = virtualSize;
|
||||
}
|
||||
|
||||
public boolean isMetaDataPopulated() {
|
||||
return isMetaDataPopulated;
|
||||
}
|
||||
|
||||
public void setMetaDataPopulated(boolean isMetaDataPopulated) {
|
||||
this.isMetaDataPopulated = isMetaDataPopulated;
|
||||
}
|
||||
|
||||
public void setPhysicalSize(long physicalSize) {
|
||||
this.physicalSize = physicalSize;
|
||||
}
|
||||
|
||||
public long getPhysicalSize() {
|
||||
return physicalSize;
|
||||
}
|
||||
|
||||
public int getMaxSizeInGB() {
|
||||
return maxSizeInGB;
|
||||
}
|
||||
|
||||
public void setMaxSizeInGB(int maxSizeInGB) {
|
||||
this.maxSizeInGB = maxSizeInGB;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public long getContentLength() {
|
||||
return contentLength;
|
||||
}
|
||||
|
||||
public void setContentLength(long contentLength) {
|
||||
this.contentLength = contentLength;
|
||||
}
|
||||
}
|
||||
|
|
@ -1174,6 +1174,36 @@ setup_secstorage() {
|
|||
fi
|
||||
setup_apache2 $ETH2_IP
|
||||
|
||||
log_it "setting up apache2 for post upload of volume/template"
|
||||
a2enmod proxy
|
||||
a2enmod proxy_http
|
||||
a2enmod headers
|
||||
|
||||
SSL_FILE="/etc/apache2/sites-available/default-ssl"
|
||||
PATTERN="RewriteRule ^\/upload\/(.*)"
|
||||
CORS_PATTERN="Header set Access-Control-Allow-Origin"
|
||||
if [ -f $SSL_FILE ]; then
|
||||
if grep -q "$PATTERN" $SSL_FILE ; then
|
||||
log_it "rewrite rules already exist in file $SSL_FILE"
|
||||
else
|
||||
log_it "adding rewrite rules to file: $SSL_FILE"
|
||||
sed -i -e "s/<\/VirtualHost>/RewriteEngine On \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/RewriteCond %{HTTPS} =on \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/RewriteCond %{REQUEST_METHOD} =POST \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/RewriteRule ^\/upload\/(.*) http:\/\/127.0.0.1:8210\/upload?uuid=\$1 [P,L] \n&/" $SSL_FILE
|
||||
fi
|
||||
if grep -q "$CORS_PATTERN" $SSL_FILE ; then
|
||||
log_it "cors rules already exist in file $SSL_FILE"
|
||||
else
|
||||
log_it "adding cors rules to file: $SSL_FILE"
|
||||
sed -i -e "s/<\/VirtualHost>/Header always set Access-Control-Allow-Origin \"*\" \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/Header always set Access-Control-Allow-Methods \"POST, OPTIONS\" \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/Header always set Access-Control-Allow-Headers \"x-requested-with, Content-Type, origin, authorization, accept, client-security-token, x-signature, x-metadata, x-expires\" \n&/" $SSL_FILE
|
||||
fi
|
||||
fi
|
||||
|
||||
service apache2 restart
|
||||
|
||||
disable_rpfilter
|
||||
enable_fwding 0
|
||||
enable_svc haproxy 0
|
||||
|
|
|
|||
|
|
@ -60,6 +60,30 @@ config_apache2_conf() {
|
|||
then
|
||||
sed -i -e "s/#SSLCertificateChainFile.*/SSLCertificateChainFile \/etc\/ssl\/certs\/cert_apache_chain.crt/" /etc/apache2/sites-available/default-ssl
|
||||
fi
|
||||
|
||||
SSL_FILE="/etc/apache2/sites-available/default-ssl"
|
||||
PATTERN="RewriteRule ^\/upload\/(.*)"
|
||||
CORS_PATTERN="Header set Access-Control-Allow-Origin"
|
||||
if [ -f $SSL_FILE ]; then
|
||||
if grep -q "$PATTERN" $SSL_FILE ; then
|
||||
echo "rewrite rules already exist in file $SSL_FILE"
|
||||
else
|
||||
echo "adding rewrite rules to file: $SSL_FILE"
|
||||
sed -i -e "s/<\/VirtualHost>/RewriteEngine On \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/RewriteCond %{HTTPS} =on \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/RewriteCond %{REQUEST_METHOD} =POST \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/RewriteRule ^\/upload\/(.*) http:\/\/127.0.0.1:8210\/upload?uuid=\$1 [P,L] \n&/" $SSL_FILE
|
||||
fi
|
||||
if grep -q "$CORS_PATTERN" $SSL_FILE ; then
|
||||
echo "cors rules already exist in file $SSL_FILE"
|
||||
else
|
||||
echo "adding cors rules to file: $SSL_FILE"
|
||||
sed -i -e "s/<\/VirtualHost>/Header always set Access-Control-Allow-Origin \"*\" \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/Header always set Access-Control-Allow-Methods \"POST, OPTIONS\" \n&/" $SSL_FILE
|
||||
sed -i -e "s/<\/VirtualHost>/Header always set Access-Control-Allow-Headers \"x-requested-with, Content-Type, origin, authorization, accept, client-security-token, x-signature, x-metadata, x-expires\" \n&/" $SSL_FILE
|
||||
fi
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
copy_certs() {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -64,11 +64,6 @@
|
|||
<artifactId>selenium-java-client-driver</artifactId>
|
||||
<version>1.0.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-httpclient</groupId>
|
||||
<artifactId>commons-httpclient</artifactId>
|
||||
<version>${cs.commons-httpclient.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<defaultGoal>compile</defaultGoal>
|
||||
|
|
|
|||
|
|
@ -224,23 +224,23 @@ test_data = {
|
|||
},
|
||||
},
|
||||
"nw_off_isolated_netscaler": {
|
||||
"name": 'Netscaler',
|
||||
"displaytext": 'Netscaler',
|
||||
"guestiptype": 'Isolated',
|
||||
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
|
||||
"traffictype": 'GUEST',
|
||||
"availability": 'Optional',
|
||||
"serviceProviderList": {
|
||||
"Dhcp": 'VirtualRouter',
|
||||
"Dns": 'VirtualRouter',
|
||||
"SourceNat": 'VirtualRouter',
|
||||
"PortForwarding": 'VirtualRouter',
|
||||
"Vpn": 'VirtualRouter',
|
||||
"Firewall": 'VirtualRouter',
|
||||
"Lb": 'Netscaler',
|
||||
"UserData": 'VirtualRouter',
|
||||
"StaticNat": 'VirtualRouter',
|
||||
},
|
||||
"name": 'Netscaler',
|
||||
"displaytext": 'Netscaler',
|
||||
"guestiptype": 'Isolated',
|
||||
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
|
||||
"traffictype": 'GUEST',
|
||||
"availability": 'Optional',
|
||||
"serviceProviderList": {
|
||||
"Dhcp": 'VirtualRouter',
|
||||
"Dns": 'VirtualRouter',
|
||||
"SourceNat": 'VirtualRouter',
|
||||
"PortForwarding": 'VirtualRouter',
|
||||
"Vpn": 'VirtualRouter',
|
||||
"Firewall": 'VirtualRouter',
|
||||
"Lb": 'Netscaler',
|
||||
"UserData": 'VirtualRouter',
|
||||
"StaticNat": 'VirtualRouter',
|
||||
},
|
||||
},
|
||||
"nw_off_isolated_persistent": {
|
||||
"name": 'Test Nw off isolated persistent',
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@
|
|||
actions: {
|
||||
// Add volume
|
||||
add: {
|
||||
label: 'label.add.volume',
|
||||
label: 'Add',
|
||||
|
||||
preFilter: function(args) {
|
||||
return !args.context.instances;
|
||||
|
|
@ -256,18 +256,25 @@
|
|||
|
||||
uploadVolume: {
|
||||
isHeader: true,
|
||||
label: 'label.upload.volume',
|
||||
label: 'Upload',
|
||||
preFilter: function(args) {
|
||||
return !args.context.instances;
|
||||
},
|
||||
messages: {
|
||||
notification: function() {
|
||||
return 'label.upload.volume';
|
||||
return 'Upload Volume from URL';
|
||||
}
|
||||
},
|
||||
createForm: {
|
||||
title: 'label.upload.volume',
|
||||
title: 'Upload Volume from URL',
|
||||
fields: {
|
||||
url: {
|
||||
label: 'label.url',
|
||||
docID: 'helpUploadVolumeURL',
|
||||
validation: {
|
||||
required: true
|
||||
}
|
||||
},
|
||||
name: {
|
||||
label: 'label.name',
|
||||
validation: {
|
||||
|
|
@ -323,14 +330,37 @@
|
|||
});
|
||||
}
|
||||
|
||||
},
|
||||
url: {
|
||||
label: 'label.url',
|
||||
docID: 'helpUploadVolumeURL',
|
||||
validation: {
|
||||
required: true
|
||||
},
|
||||
diskOffering: {
|
||||
label: 'Custom Disk Offering',
|
||||
docID: 'helpVolumeDiskOffering',
|
||||
select: function(args) {
|
||||
var diskofferingObjs;
|
||||
$.ajax({
|
||||
url: createURL("listDiskOfferings"),
|
||||
dataType: "json",
|
||||
async: false,
|
||||
success: function(json) {
|
||||
diskofferingObjs = json.listdiskofferingsresponse.diskoffering;
|
||||
var items = [{
|
||||
id: '',
|
||||
description: ''
|
||||
}];
|
||||
$(diskofferingObjs).each(function() {
|
||||
if (this.iscustomized == true) {
|
||||
items.push({
|
||||
id: this.id,
|
||||
description: this.displaytext
|
||||
});
|
||||
}
|
||||
});
|
||||
args.response.success({
|
||||
data: items
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
},
|
||||
diskOffering: {
|
||||
label: 'Custom Disk Offering',
|
||||
docID: 'helpVolumeDiskOffering',
|
||||
|
|
@ -414,7 +444,147 @@
|
|||
notification: {
|
||||
poll: pollAsyncJobResult
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
uploadVolumefromLocal: {
|
||||
isHeader: true,
|
||||
label: 'Upload from Local',
|
||||
preFilter: function(args) {
|
||||
return !args.context.instances;
|
||||
},
|
||||
messages: {
|
||||
notification: function() {
|
||||
return 'Upload Volume from Local';
|
||||
}
|
||||
},
|
||||
createForm: {
|
||||
title: 'Upload Volume from Local',
|
||||
fileUpload: {
|
||||
getURL: function(args) {
|
||||
args.data = args.formData;
|
||||
|
||||
var data = {
|
||||
name: args.data.name,
|
||||
zoneId: args.data.availabilityZone,
|
||||
format: args.data.format,
|
||||
url: args.data.url
|
||||
};
|
||||
|
||||
if (args.data.checksum != null && args.data.checksum.length > 0) {
|
||||
$.extend(data, {
|
||||
checksum: args.data.checksum
|
||||
});
|
||||
}
|
||||
|
||||
$.ajax({
|
||||
url: createURL('getUploadParamsForVolume'),
|
||||
data: data,
|
||||
async: false,
|
||||
success: function(json) {
|
||||
var uploadparams = json.postuploadvolumeresponse.getuploadparams; //son.postuploadvolumeresponse.getuploadparams is an object, not an array of object.
|
||||
var volumeId = uploadparams.id;
|
||||
|
||||
args.response.success({
|
||||
url: uploadparams.postURL,
|
||||
ajaxPost: true,
|
||||
data: {
|
||||
'X-signature': uploadparams.signature,
|
||||
'X-expires': uploadparams.expires,
|
||||
'X-metadata': uploadparams.metadata
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
},
|
||||
postUpload: function(args) {
|
||||
if(args.error) {
|
||||
args.response.error(args.errorMsg);
|
||||
} else {
|
||||
cloudStack.dialog.notice({
|
||||
message: "This volume file has been uploaded. Please check its status at Stroage menu > Volumes > " + args.data.name + " > Status field."
|
||||
});
|
||||
args.response.success();
|
||||
}
|
||||
}
|
||||
},
|
||||
fields: {
|
||||
volumeFileUpload: {
|
||||
label: 'local file',
|
||||
isFileUpload: true,
|
||||
validation: {
|
||||
required: true
|
||||
}
|
||||
},
|
||||
name: {
|
||||
label: 'label.name',
|
||||
validation: {
|
||||
required: true
|
||||
},
|
||||
docID: 'helpUploadVolumeName'
|
||||
},
|
||||
availabilityZone: {
|
||||
label: 'label.availability.zone',
|
||||
docID: 'helpUploadVolumeZone',
|
||||
select: function(args) {
|
||||
$.ajax({
|
||||
url: createURL("listZones&available=true"),
|
||||
dataType: "json",
|
||||
async: true,
|
||||
success: function(json) {
|
||||
var zoneObjs = json.listzonesresponse.zone;
|
||||
args.response.success({
|
||||
descriptionField: 'name',
|
||||
data: zoneObjs
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
format: {
|
||||
label: 'label.format',
|
||||
docID: 'helpUploadVolumeFormat',
|
||||
select: function(args) {
|
||||
var items = [];
|
||||
items.push({
|
||||
id: 'RAW',
|
||||
description: 'RAW'
|
||||
});
|
||||
items.push({
|
||||
id: 'VHD',
|
||||
description: 'VHD'
|
||||
});
|
||||
items.push({
|
||||
id: 'VHDX',
|
||||
description: 'VHDX'
|
||||
});
|
||||
items.push({
|
||||
id: 'OVA',
|
||||
description: 'OVA'
|
||||
});
|
||||
items.push({
|
||||
id: 'QCOW2',
|
||||
description: 'QCOW2'
|
||||
});
|
||||
args.response.success({
|
||||
data: items
|
||||
});
|
||||
}
|
||||
},
|
||||
checksum: {
|
||||
docID: 'helpUploadVolumeChecksum',
|
||||
label: 'label.md5.checksum'
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
action: function(args) {
|
||||
return; //createForm.fileUpload.getURL() has executed the whole action. Therefore, nothing needs to be done here.
|
||||
},
|
||||
|
||||
notification: {
|
||||
poll: pollAsyncJobResult
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
advSearchFields: {
|
||||
|
|
|
|||
|
|
@ -100,17 +100,24 @@
|
|||
reorder: cloudStack.api.actions.sort('updateTemplate', 'templates'),
|
||||
actions: {
|
||||
add: {
|
||||
label: 'label.action.register.template',
|
||||
label: 'Add',
|
||||
messages: {
|
||||
notification: function(args) {
|
||||
return 'label.action.register.template';
|
||||
return 'Register Template from URL';
|
||||
}
|
||||
},
|
||||
createForm: {
|
||||
title: 'label.action.register.template',
|
||||
title: 'Register Template from URL',
|
||||
docID: 'helpNetworkOfferingName',
|
||||
preFilter: cloudStack.preFilter.createTemplate,
|
||||
fields: {
|
||||
url: {
|
||||
label: 'label.url',
|
||||
docID: 'helpRegisterTemplateURL',
|
||||
validation: {
|
||||
required: true
|
||||
}
|
||||
},
|
||||
name: {
|
||||
label: 'label.name',
|
||||
docID: 'helpRegisterTemplateName',
|
||||
|
|
@ -124,14 +131,7 @@
|
|||
validation: {
|
||||
required: true
|
||||
}
|
||||
},
|
||||
url: {
|
||||
label: 'label.url',
|
||||
docID: 'helpRegisterTemplateURL',
|
||||
validation: {
|
||||
required: true
|
||||
}
|
||||
},
|
||||
},
|
||||
zone: {
|
||||
label: 'label.zone',
|
||||
docID: 'helpRegisterTemplateZone',
|
||||
|
|
@ -573,6 +573,311 @@
|
|||
});
|
||||
},
|
||||
|
||||
notification: {
|
||||
poll: function(args) {
|
||||
args.complete();
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
uploadTemplateFromLocal: {
|
||||
isHeader: true,
|
||||
label: 'Upload from Local',
|
||||
messages: {
|
||||
notification: function(args) {
|
||||
return 'Upload Template from Local';
|
||||
}
|
||||
},
|
||||
createForm: {
|
||||
title: 'Upload Template from Local',
|
||||
preFilter: cloudStack.preFilter.createTemplate,
|
||||
fileUpload: {
|
||||
getURL: function(args) {
|
||||
args.data = args.formData;
|
||||
|
||||
var data = {
|
||||
name: args.data.name,
|
||||
displayText: args.data.description,
|
||||
zoneid: args.data.zone,
|
||||
format: args.data.format,
|
||||
isextractable: (args.data.isExtractable == "on"),
|
||||
passwordEnabled: (args.data.isPasswordEnabled == "on"),
|
||||
isdynamicallyscalable: (args.data.isdynamicallyscalable == "on"),
|
||||
osTypeId: args.data.osTypeId,
|
||||
hypervisor: args.data.hypervisor
|
||||
};
|
||||
|
||||
if (args.$form.find('.form-item[rel=isPublic]').css("display") != "none") {
|
||||
$.extend(data, {
|
||||
ispublic: (args.data.isPublic == "on")
|
||||
});
|
||||
}
|
||||
|
||||
if (args.$form.find('.form-item[rel=requireshvm]').css("display") != "none") {
|
||||
$.extend(data, {
|
||||
requireshvm: (args.data.requireshvm == "on")
|
||||
});
|
||||
}
|
||||
|
||||
if (args.$form.find('.form-item[rel=isFeatured]').css("display") != "none") {
|
||||
$.extend(data, {
|
||||
isfeatured: (args.data.isFeatured == "on")
|
||||
});
|
||||
}
|
||||
|
||||
if (args.$form.find('.form-item[rel=isrouting]').is(':visible')) {
|
||||
$.extend(data, {
|
||||
isrouting: (args.data.isrouting === 'on')
|
||||
});
|
||||
}
|
||||
|
||||
$.ajax({
|
||||
url: createURL('getUploadParamsForTemplate'),
|
||||
data: data,
|
||||
async: false,
|
||||
success: function(json) {
|
||||
var uploadparams = json.postuploadtemplateresponse.getuploadparams;
|
||||
var templateId = uploadparams.id;
|
||||
|
||||
args.response.success({
|
||||
url: uploadparams.postURL,
|
||||
ajaxPost: true,
|
||||
data: {
|
||||
'X-signature': uploadparams.signature,
|
||||
'X-expires': uploadparams.expires,
|
||||
'X-metadata': uploadparams.metadata
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
},
|
||||
postUpload: function(args) {
|
||||
if(args.error) {
|
||||
args.response.error(args.errorMsg);
|
||||
} else {
|
||||
cloudStack.dialog.notice({
|
||||
message: "This template file has been uploaded. Please check its status at Templates menu > " + args.data.name + " > Zones tab > click a zone > Status field and Ready field."
|
||||
});
|
||||
args.response.success();
|
||||
}
|
||||
}
|
||||
},
|
||||
fields: {
|
||||
templateFileUpload: {
|
||||
label: 'local file',
|
||||
isFileUpload: true,
|
||||
validation: {
|
||||
required: true
|
||||
}
|
||||
},
|
||||
|
||||
name: {
|
||||
label: 'label.name',
|
||||
docID: 'helpRegisterTemplateName',
|
||||
validation: {
|
||||
required: true
|
||||
}
|
||||
},
|
||||
|
||||
description: {
|
||||
label: 'label.description',
|
||||
docID: 'helpRegisterTemplateDescription',
|
||||
validation: {
|
||||
required: true
|
||||
}
|
||||
},
|
||||
|
||||
zone: {
|
||||
label: 'label.zone',
|
||||
docID: 'helpRegisterTemplateZone',
|
||||
select: function(args) {
|
||||
$.ajax({
|
||||
url: createURL("listZones&available=true"),
|
||||
dataType: "json",
|
||||
async: true,
|
||||
success: function(json) {
|
||||
var zoneObjs = json.listzonesresponse.zone;
|
||||
args.response.success({
|
||||
descriptionField: 'name',
|
||||
data: zoneObjs
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
hypervisor: {
|
||||
label: 'label.hypervisor',
|
||||
docID: 'helpRegisterTemplateHypervisor',
|
||||
dependsOn: 'zone',
|
||||
select: function(args) {
|
||||
if (args.zone == null)
|
||||
return;
|
||||
|
||||
var apiCmd;
|
||||
if (args.zone == -1) { //All Zones
|
||||
//apiCmd = "listHypervisors&zoneid=-1"; //"listHypervisors&zoneid=-1" has been changed to return only hypervisors available in all zones (bug 8809)
|
||||
apiCmd = "listHypervisors";
|
||||
} else {
|
||||
apiCmd = "listHypervisors&zoneid=" + args.zone;
|
||||
}
|
||||
|
||||
$.ajax({
|
||||
url: createURL(apiCmd),
|
||||
dataType: "json",
|
||||
async: false,
|
||||
success: function(json) {
|
||||
var hypervisorObjs = json.listhypervisorsresponse.hypervisor;
|
||||
var items = [];
|
||||
$(hypervisorObjs).each(function() {
|
||||
items.push({
|
||||
id: this.name,
|
||||
description: this.name
|
||||
});
|
||||
});
|
||||
args.response.success({
|
||||
data: items
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
format: {
|
||||
label: 'label.format',
|
||||
docID: 'helpRegisterTemplateFormat',
|
||||
dependsOn: 'hypervisor',
|
||||
select: function(args) {
|
||||
var items = [];
|
||||
if (args.hypervisor == "XenServer") {
|
||||
items.push({
|
||||
id: 'VHD',
|
||||
description: 'VHD'
|
||||
});
|
||||
} else if (args.hypervisor == "VMware") {
|
||||
items.push({
|
||||
id: 'OVA',
|
||||
description: 'OVA'
|
||||
});
|
||||
} else if (args.hypervisor == "KVM") {
|
||||
items.push({
|
||||
id: 'QCOW2',
|
||||
description: 'QCOW2'
|
||||
});
|
||||
items.push({
|
||||
id: 'RAW',
|
||||
description: 'RAW'
|
||||
});
|
||||
items.push({
|
||||
id: 'VHD',
|
||||
description: 'VHD'
|
||||
});
|
||||
items.push({
|
||||
id: 'VMDK',
|
||||
description: 'VMDK'
|
||||
});
|
||||
} else if (args.hypervisor == "BareMetal") {
|
||||
items.push({
|
||||
id: 'BareMetal',
|
||||
description: 'BareMetal'
|
||||
});
|
||||
} else if (args.hypervisor == "Ovm") {
|
||||
items.push({
|
||||
id: 'RAW',
|
||||
description: 'RAW'
|
||||
});
|
||||
} else if (args.hypervisor == "LXC") {
|
||||
items.push({
|
||||
id: 'TAR',
|
||||
description: 'TAR'
|
||||
});
|
||||
} else if (args.hypervisor == "Hyperv") {
|
||||
items.push({
|
||||
id: 'VHD',
|
||||
description: 'VHD'
|
||||
});
|
||||
items.push({
|
||||
id: 'VHDX',
|
||||
description: 'VHDX'
|
||||
});
|
||||
}
|
||||
args.response.success({
|
||||
data: items
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
osTypeId: {
|
||||
label: 'label.os.type',
|
||||
docID: 'helpRegisterTemplateOSType',
|
||||
select: function(args) {
|
||||
$.ajax({
|
||||
url: createURL("listOsTypes"),
|
||||
dataType: "json",
|
||||
async: true,
|
||||
success: function(json) {
|
||||
var ostypeObjs = json.listostypesresponse.ostype;
|
||||
args.response.success({
|
||||
data: ostypeObjs
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
isExtractable: {
|
||||
label: "extractable",
|
||||
docID: 'helpRegisterTemplateExtractable',
|
||||
isBoolean: true
|
||||
},
|
||||
|
||||
isPasswordEnabled: {
|
||||
label: "label.password.enabled",
|
||||
docID: 'helpRegisterTemplatePasswordEnabled',
|
||||
isBoolean: true
|
||||
},
|
||||
|
||||
isdynamicallyscalable: {
|
||||
label: "label.dynamically.scalable",
|
||||
docID: 'helpRegisterTemplateDynamicallyScalable',
|
||||
isBoolean: true
|
||||
},
|
||||
|
||||
isPublic: {
|
||||
label: "label.public",
|
||||
docID: 'helpRegisterTemplatePublic',
|
||||
isBoolean: true,
|
||||
isHidden: true
|
||||
},
|
||||
|
||||
isFeatured: {
|
||||
label: "label.featured",
|
||||
docID: 'helpRegisterTemplateFeatured',
|
||||
isBoolean: true,
|
||||
isHidden: true
|
||||
},
|
||||
|
||||
isrouting: {
|
||||
label: 'label.routing',
|
||||
docID: 'helpRegisterTemplateRouting',
|
||||
isBoolean: true,
|
||||
isHidden: true
|
||||
},
|
||||
|
||||
requireshvm: {
|
||||
label: 'label.hvm',
|
||||
docID: 'helpRegisterTemplateHvm',
|
||||
isBoolean: true,
|
||||
isHidden: false,
|
||||
isChecked: true
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
action: function(args) {
|
||||
return; //createForm.fileUpload.getURL() has executed the whole action. Therefore, nothing needs to be done here.
|
||||
},
|
||||
|
||||
notification: {
|
||||
poll: function(args) {
|
||||
args.complete();
|
||||
|
|
|
|||
|
|
@ -474,6 +474,16 @@
|
|||
if (field.defaultValue) {
|
||||
$input.val(strOrFunc(field.defaultValue));
|
||||
}
|
||||
} else if (field.isFileUpload) {
|
||||
$input = $('<input>').attr({
|
||||
type: 'file',
|
||||
name: 'files[]'
|
||||
}).appendTo($value);
|
||||
|
||||
// Add events
|
||||
$input.change(function(event) {
|
||||
$form.data('files', event.target.files);
|
||||
});
|
||||
} else if (field.isTokenInput) { // jquery.tokeninput.js
|
||||
isAsync = true;
|
||||
|
||||
|
|
@ -673,12 +683,131 @@
|
|||
}
|
||||
}
|
||||
|
||||
args.after({
|
||||
data: data,
|
||||
ref: args.ref, // For backwards compatibility; use context
|
||||
context: args.context,
|
||||
$form: $form
|
||||
});
|
||||
var uploadFiles = function() {
|
||||
$form.prepend($('<div>').addClass('loading-overlay'));
|
||||
args.form.fileUpload.getURL({
|
||||
$form: $form,
|
||||
formData: data,
|
||||
context: args.context,
|
||||
response: {
|
||||
success: function(successArgs) {
|
||||
var $file = $form.find('input[type=file]');
|
||||
var postUploadArgs = {
|
||||
$form: $form,
|
||||
data: data,
|
||||
context: args.context,
|
||||
response: {
|
||||
success: function() {
|
||||
args.after({
|
||||
data: data,
|
||||
ref: args.ref, // For backwards compatibility; use context
|
||||
context: args.context,
|
||||
$form: $form
|
||||
});
|
||||
|
||||
$('div.overlay').remove();
|
||||
$form.find('.loading-overlay').remove();
|
||||
$('div.loading-overlay').remove();
|
||||
|
||||
$('.tooltip-box').remove();
|
||||
$formContainer.remove();
|
||||
$(this).dialog('destroy');
|
||||
|
||||
$('.hovered-elem').hide();
|
||||
},
|
||||
error: function(msg) {
|
||||
$('div.overlay').remove();
|
||||
$form.find('.loading-overlay').remove();
|
||||
$('div.loading-overlay').remove();
|
||||
|
||||
cloudStack.dialog.error({ message: msg });
|
||||
}
|
||||
}
|
||||
};
|
||||
var postUploadArgsWithStatus = $.extend(true, {}, postUploadArgs);
|
||||
|
||||
if(successArgs.ajaxPost) {
|
||||
var request = new FormData();
|
||||
request.append('file', $file.prop("files")[0]);
|
||||
$.ajax({
|
||||
type: 'POST',
|
||||
url: successArgs.url,
|
||||
data: request,
|
||||
dataType : 'html',
|
||||
processData: false,
|
||||
contentType: false,
|
||||
headers: successArgs.data,
|
||||
success: function(r) {
|
||||
postUploadArgsWithStatus.error = false;
|
||||
args.form.fileUpload.postUpload(postUploadArgsWithStatus);
|
||||
},
|
||||
error: function(r) {
|
||||
postUploadArgsWithStatus.error = true;
|
||||
postUploadArgsWithStatus.errorMsg = r.responseText;
|
||||
args.form.fileUpload.postUpload(postUploadArgsWithStatus);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
//
|
||||
// Move file field into iframe; keep visible for consistency
|
||||
//
|
||||
var $uploadFrame = $('<iframe>');
|
||||
var $frameForm = $('<form>').attr({
|
||||
method: 'POST',
|
||||
action: successArgs.url,
|
||||
enctype: 'multipart/form-data'
|
||||
});
|
||||
var $field = $file.closest('.form-item .value');
|
||||
|
||||
// Add additional passed data
|
||||
$.map(successArgs.data, function(v, k) {
|
||||
var $hidden = $('<input>').attr({
|
||||
type: 'hidden',
|
||||
name: k,
|
||||
value: v
|
||||
});
|
||||
|
||||
$hidden.appendTo($frameForm);
|
||||
|
||||
});
|
||||
|
||||
console.log("The following object is a hidden HTML form that will submit local file with hidden field signature/expires/metadata:");
|
||||
console.log($frameForm);
|
||||
|
||||
$uploadFrame.css({ width: $field.outerWidth(), height: $field.height() }).show();
|
||||
$frameForm.append($file);
|
||||
$field.append($uploadFrame);
|
||||
$uploadFrame.contents().find('html body').append($frameForm);
|
||||
$frameForm.submit(function() {
|
||||
console.log("callback() in $frameForm.submit(callback(){}) is triggered");
|
||||
$uploadFrame.load(function() {
|
||||
console.log("callback() in $uploadFrame.load(callback(){}) is triggered");
|
||||
args.form.fileUpload.postUpload(postUploadArgs);
|
||||
});
|
||||
return true;
|
||||
});
|
||||
$frameForm.submit();
|
||||
}
|
||||
},
|
||||
error: function(msg) {
|
||||
cloudStack.dialog.error({ message: msg });
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
if ($form.data('files')) {
|
||||
uploadFiles();
|
||||
|
||||
return false;
|
||||
} else {
|
||||
args.after({
|
||||
data: data,
|
||||
ref: args.ref, // For backwards compatibility; use context
|
||||
context: args.context,
|
||||
$form: $form
|
||||
});
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -118,6 +118,7 @@
|
|||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<scope>provided</scope>
|
||||
<version>${cs.commons-io.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.reflections</groupId>
|
||||
|
|
@ -186,19 +187,6 @@
|
|||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>com.mycila</groupId>
|
||||
<artifactId>license-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>cloudstack-checklicence</id>
|
||||
<phase>process-classes</phase>
|
||||
<goals>
|
||||
<goal>check</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
<resources>
|
||||
<resource>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package com.cloud.utils;
|
||||
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.jasypt.encryption.pbe.PBEStringEncryptor;
|
||||
import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
|
||||
|
||||
import javax.crypto.Mac;
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
|
||||
public class EncryptionUtil {
|
||||
public static final Logger s_logger = Logger.getLogger(EncryptionUtil.class.getName());
|
||||
private static PBEStringEncryptor encryptor;
|
||||
|
||||
private static void initialize(String key) {
|
||||
StandardPBEStringEncryptor standardPBEStringEncryptor = new StandardPBEStringEncryptor();
|
||||
standardPBEStringEncryptor.setAlgorithm("PBEWITHSHA1ANDDESEDE");
|
||||
standardPBEStringEncryptor.setPassword(key);
|
||||
encryptor = standardPBEStringEncryptor;
|
||||
}
|
||||
|
||||
public static String encodeData(String data, String key) {
|
||||
if (encryptor == null) {
|
||||
initialize(key);
|
||||
}
|
||||
return encryptor.encrypt(data);
|
||||
}
|
||||
|
||||
public static String decodeData(String encodedData, String key) {
|
||||
if (encryptor == null) {
|
||||
initialize(key);
|
||||
}
|
||||
return encryptor.decrypt(encodedData);
|
||||
}
|
||||
|
||||
public static String generateSignature(String data, String key) {
|
||||
try {
|
||||
final Mac mac = Mac.getInstance("HmacSHA1");
|
||||
final SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), "HmacSHA1");
|
||||
mac.init(keySpec);
|
||||
mac.update(data.getBytes());
|
||||
final byte[] encryptedBytes = mac.doFinal();
|
||||
return Base64.encodeBase64String(encryptedBytes);
|
||||
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
|
||||
s_logger.error("exception occurred which encoding the data.", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,30 +1,43 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package org.apache.cloudstack.utils.template;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.utils.imagestore;
|
||||
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public class TemplateUtils {
|
||||
public static final Logger s_logger = Logger.getLogger(TemplateUtils.class.getName());
|
||||
public class ImageStoreUtil {
|
||||
public static final Logger s_logger = Logger.getLogger(ImageStoreUtil.class.getName());
|
||||
|
||||
public static String generatePostUploadUrl(String ssvmUrlDomain, String ipAddress, String uuid) {
|
||||
String hostname = ipAddress;
|
||||
|
||||
//if ssvm url domain is present, use it to construct hostname in the format 1-2-3-4.domain
|
||||
// if the domain name is not present, ssl validation fails and has to be ignored
|
||||
if(StringUtils.isNotBlank(ssvmUrlDomain)) {
|
||||
hostname = ipAddress.replace(".", "-");
|
||||
hostname = hostname + ssvmUrlDomain.substring(1);
|
||||
}
|
||||
|
||||
//only https works with postupload and url format is fixed
|
||||
return "https://" + hostname + "/upload/" + uuid;
|
||||
}
|
||||
|
||||
// given a path, returns empty if path is supported image, and the file type if unsupported
|
||||
// this is meant to catch things like accidental upload of ASCII text .vmdk descriptor
|
||||
|
|
@ -75,7 +88,7 @@ public class TemplateUtils {
|
|||
return output;
|
||||
}
|
||||
|
||||
public static boolean isCorrectExtension(String path, String ext) {
|
||||
private static boolean isCorrectExtension(String path, String ext) {
|
||||
if (path.toLowerCase().endsWith(ext)
|
||||
|| path.toLowerCase().endsWith(ext + ".gz")
|
||||
|| path.toLowerCase().endsWith(ext + ".bz2")
|
||||
|
|
@ -85,7 +98,7 @@ public class TemplateUtils {
|
|||
return false;
|
||||
}
|
||||
|
||||
public static boolean isCompressedExtension(String path) {
|
||||
private static boolean isCompressedExtension(String path) {
|
||||
if (path.toLowerCase().endsWith(".gz")
|
||||
|| path.toLowerCase().endsWith(".bz2")
|
||||
|| path.toLowerCase().endsWith(".zip")) {
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
package org.apache.cloudstack.utils.imagestore;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
public class ImageStoreUtilTest {
|
||||
|
||||
@Test
|
||||
public void testgeneratePostUploadUrl() throws MalformedURLException {
|
||||
String ssvmdomain = "*.realhostip.com";
|
||||
String ipAddress = "10.147.28.14";
|
||||
String uuid = UUID.randomUUID().toString();
|
||||
|
||||
//ssvm domain is not set
|
||||
String url = ImageStoreUtil.generatePostUploadUrl(null, ipAddress, uuid);
|
||||
assertPostUploadUrl(url, ipAddress, uuid);
|
||||
|
||||
//ssvm domain is set to empty value
|
||||
url = ImageStoreUtil.generatePostUploadUrl("", ipAddress, uuid);
|
||||
assertPostUploadUrl(url, ipAddress, uuid);
|
||||
|
||||
//ssvm domain is set to a valid value
|
||||
url = ImageStoreUtil.generatePostUploadUrl(ssvmdomain, ipAddress, uuid);
|
||||
assertPostUploadUrl(url, ipAddress.replace(".", "-") + ssvmdomain.substring(1), uuid);
|
||||
}
|
||||
|
||||
private void assertPostUploadUrl(String urlStr, String domain, String uuid) throws MalformedURLException {
|
||||
URL url = new URL(urlStr);
|
||||
Assert.assertNotNull(url);
|
||||
Assert.assertEquals(url.getHost(), domain);
|
||||
Assert.assertEquals(url.getPath(), "/upload/" + uuid);
|
||||
}
|
||||
|
||||
}
|
||||
Loading…
Reference in New Issue