Removal of S3SoapService from solution

This commit is contained in:
JohnZ 2012-04-28 13:35:32 +01:00 committed by Salvatore Orlando
parent 75581130d7
commit f05e3dd456
23 changed files with 201 additions and 668 deletions

View File

@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
package com.cloud.bridge.io;
import java.io.File;
import java.io.FileOutputStream;
@ -29,13 +29,13 @@ import javax.activation.DataSource;
import org.apache.log4j.Logger;
import com.cloud.bridge.io.FileRangeDataSource;
import com.cloud.bridge.service.core.s3.S3BucketAdapter;
import com.cloud.bridge.service.core.s3.S3MultipartPart;
import com.cloud.bridge.service.exception.FileNotExistException;
import com.cloud.bridge.service.exception.InternalErrorException;
import com.cloud.bridge.service.exception.OutOfStorageException;
import com.cloud.bridge.util.StringHelper;
import com.cloud.bridge.util.Tuple;
import com.cloud.bridge.util.OrderedPair;
/**
* @author Kelven Yang, John Zucker
@ -136,10 +136,10 @@ public class S3FileSystemBucketAdapter implements S3BucketAdapter {
* @param sourceBucket - special bucket used to save uploaded file parts
* @param parts - an array of file names in the sourceBucket
* @param client - if not null, then keep the servlet connection alive while this potentially long concatentation takes place
* @return Tuple with the first value the MD5 of the final object, and the second value the length of the final object
* @return OrderedPair with the first value the MD5 of the final object, and the second value the length of the final object
*/
@Override
public Tuple<String,Long> concatentateObjects(String mountedRoot, String destBucket, String fileName, String sourceBucket, S3MultipartPart[] parts, OutputStream client)
public OrderedPair<String,Long> concatentateObjects(String mountedRoot, String destBucket, String fileName, String sourceBucket, S3MultipartPart[] parts, OutputStream client)
{
MessageDigest md5;
long totalLength = 0;
@ -181,8 +181,8 @@ public class S3FileSystemBucketAdapter implements S3BucketAdapter {
}
}
fos.close();
return new Tuple<String, Long>(StringHelper.toHexString(md5.digest()), new Long(totalLength));
//Create a tuple whose first element is the MD4 digest as a (lowercase) hex String
return new OrderedPair<String, Long>(StringHelper.toHexString(md5.digest()), new Long(totalLength));
//Create an ordered pair whose first element is the MD4 digest as a (lowercase) hex String
}
catch(IOException e) {
logger.error("concatentateObjects unexpected exception " + e.getMessage(), e);

View File

@ -19,7 +19,7 @@ import org.apache.axis2.context.ConfigurationContext;
import org.apache.axis2.description.AxisService;
import org.apache.axis2.engine.ServiceLifeCycle;
import com.cloud.bridge.service.ServiceProvider;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
/**
* @author Kelven Yang

View File

@ -19,7 +19,8 @@ import java.io.Serializable;
import java.util.Date;
/**
* @author Kelven Yang
* @author Kelven Yang, John Zucker
* A model of stored ACLs to remember the ACL permissions per canonicalUserID and grantee
*/
public class SAcl implements Serializable {
private static final long serialVersionUID = 7900837117165018850L;

View File

@ -76,7 +76,7 @@ public class SObjectItem implements Serializable {
}
public void setStoredPath(String storedPath) {
this.storedPath = storedPath;
this.storedPath = storedPath; // TODO - storedpath holds integer, called from S3Engine.allocObjectItem
}
public long getStoredSize() {

View File

@ -28,7 +28,7 @@ import org.hibernate.Session;
import org.hibernate.Transaction;
import com.cloud.bridge.util.CloudSessionFactory;
import com.cloud.bridge.util.Tuple;
import com.cloud.bridge.util.OrderedPair;
/**
* @author Kelven Yang
@ -212,7 +212,7 @@ public class PersistContext {
@SuppressWarnings("deprecation")
private static Connection getJDBCConnection(String name, boolean allocNew) {
String registryKey = "JDBC-Connection." + name;
Tuple<Session, Connection> info = (Tuple<Session, Connection>)getThreadStoreObject(registryKey);
OrderedPair<Session, Connection> info = (OrderedPair<Session, Connection>)getThreadStoreObject(registryKey);
if(info == null && allocNew) {
Session session = sessionFactory.openSession();
Connection connection = session.connection();
@ -234,7 +234,7 @@ public class PersistContext {
return null;
}
registerThreadStoreObject(registryKey, new Tuple<Session, Connection>(session, connection));
registerThreadStoreObject(registryKey, new OrderedPair<Session, Connection>(session, connection));
return connection;
}
@ -246,7 +246,7 @@ public class PersistContext {
private static void releaseJDBCConnection(String name) {
String registryKey = "JDBC-Connection." + name;
Tuple<Session, Connection> info = (Tuple<Session, Connection>)unregisterThreadStoreObject(registryKey);
OrderedPair<Session, Connection> info = (OrderedPair<Session, Connection>)unregisterThreadStoreObject(registryKey);
if(info != null) {
try {
info.getSecond().close();

View File

@ -37,7 +37,7 @@ import com.cloud.bridge.service.core.s3.S3MetaDataEntry;
import com.cloud.bridge.service.core.s3.S3MultipartPart;
import com.cloud.bridge.service.core.s3.S3MultipartUpload;
import com.cloud.bridge.util.ConfigurationHelper;
import com.cloud.bridge.util.Tuple;
import com.cloud.bridge.util.OrderedPair;
public class MultipartLoadDao {
public static final Logger logger = Logger.getLogger(MultipartLoadDao.class);
@ -74,7 +74,7 @@ public class MultipartLoadDao {
* @return creator of the multipart upload, and NameKey of upload
* @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException
*/
public Tuple<String,String> multipartExits( int uploadId )
public OrderedPair<String,String> multipartExits( int uploadId )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
PreparedStatement statement = null;
@ -89,7 +89,7 @@ public class MultipartLoadDao {
if ( rs.next()) {
accessKey = rs.getString( "AccessKey" );
nameKey = rs.getString( "NameKey" );
return new Tuple<String,String>( accessKey, nameKey );
return new OrderedPair<String,String>( accessKey, nameKey );
}
else return null;
@ -334,10 +334,10 @@ public class MultipartLoadDao {
* @param prefix - can be null
* @param keyMarker - can be null
* @param uploadIdMarker - can be null, should only be defined if keyMarker is not-null
* @return Tuple<S3MultipartUpload[], isTruncated>
* @return OrderedPair<S3MultipartUpload[], isTruncated>
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public Tuple<S3MultipartUpload[],Boolean> getInitiatedUploads( String bucketName, int maxParts, String prefix, String keyMarker, String uploadIdMarker )
public OrderedPair<S3MultipartUpload[],Boolean> getInitiatedUploads( String bucketName, int maxParts, String prefix, String keyMarker, String uploadIdMarker )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
S3MultipartUpload[] inProgress = new S3MultipartUpload[maxParts];
@ -383,7 +383,7 @@ public class MultipartLoadDao {
statement.close();
if (i < maxParts) inProgress = (S3MultipartUpload[])resizeArray(inProgress,i);
return new Tuple<S3MultipartUpload[], Boolean>(inProgress, isTruncated);
return new OrderedPair<S3MultipartUpload[], Boolean>(inProgress, isTruncated);
} finally {
closeConnection();

View File

@ -95,6 +95,7 @@ import com.amazon.ec2.TerminateInstancesResponse;
import com.cloud.bridge.model.UserCredentials;
import com.cloud.bridge.persist.dao.OfferingDao;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
import com.cloud.bridge.service.core.ec2.EC2AssociateAddress;
import com.cloud.bridge.service.core.ec2.EC2AuthorizeRevokeSecurityGroup;
import com.cloud.bridge.service.core.ec2.EC2CreateImage;

View File

@ -18,6 +18,7 @@ package com.cloud.bridge.service;
import org.apache.log4j.Logger;
import com.amazon.ec2.*;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
public class EC2SoapService implements AmazonEC2SkeletonInterface {
protected final static Logger logger = Logger.getLogger(EC2SoapService.class);

View File

@ -1,42 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
import java.io.IOException;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.UnsupportedCallbackException;
import org.apache.ws.security.WSPasswordCallback;
public class PWCBHandler implements CallbackHandler {
@SuppressWarnings("deprecation")
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
for (int i = 0; i < callbacks.length; i++) {
WSPasswordCallback pwcb = (WSPasswordCallback)callbacks[i];
String id = pwcb.getIdentifer();
if ( "client".equals(id)) {
pwcb.setPassword("apache");
}
else if("service".equals(id)) {
pwcb.setPassword("apache");
}
}
}
}

View File

@ -44,6 +44,8 @@ import com.cloud.bridge.persist.PersistContext;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.controller.s3.S3BucketAction;
import com.cloud.bridge.service.controller.s3.S3ObjectAction;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
import com.cloud.bridge.service.controller.s3.ServletAction;
import com.cloud.bridge.service.core.s3.S3AccessControlList;
import com.cloud.bridge.service.core.s3.S3AuthParams;
import com.cloud.bridge.service.core.s3.S3Engine;
@ -316,8 +318,6 @@ public class S3RestServlet extends HttpServlet {
return;
}
// TODO - Remove soon -> turn off auth - just for testing
//UserContext.current().initContext("Mark", "123", "Mark", "testing", request);
} catch (SignatureException e) {
throw new PermissionDeniedException(e);

View File

@ -1,115 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
import org.apache.axis2.AxisFault;
import org.apache.log4j.Logger;
import com.amazon.s3.*;
/**
* @author Kelven Yang
*/
public class S3SoapService implements AmazonS3SkeletonInterface {
protected final static Logger logger = Logger.getLogger(S3SoapService.class);
public GetBucketLoggingStatusResponse getBucketLoggingStatus(GetBucketLoggingStatus getBucketLoggingStatus0) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getBucketLoggingStatus(getBucketLoggingStatus0);
}
public CopyObjectResponse copyObject(com.amazon.s3.CopyObject copyObject2) throws AxisFault {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.copyObject(copyObject2);
}
public GetBucketAccessControlPolicyResponse getBucketAccessControlPolicy (
GetBucketAccessControlPolicy getBucketAccessControlPolicy4) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getBucketAccessControlPolicy (getBucketAccessControlPolicy4);
}
public ListBucketResponse listBucket (ListBucket listBucket6) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.listBucket (listBucket6);
}
public PutObjectResponse putObject(PutObject putObject8) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.putObject(putObject8);
}
public CreateBucketResponse createBucket (CreateBucket createBucket) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.createBucket(createBucket);
}
public ListAllMyBucketsResponse listAllMyBuckets (
ListAllMyBuckets listAllMyBuckets12) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.listAllMyBuckets (listAllMyBuckets12);
}
public GetObjectResponse getObject(com.amazon.s3.GetObject getObject14) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getObject(getObject14);
}
public DeleteBucketResponse deleteBucket(DeleteBucket deleteBucket16) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.deleteBucket(deleteBucket16);
}
public SetBucketLoggingStatusResponse setBucketLoggingStatus(
SetBucketLoggingStatus setBucketLoggingStatus18) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.setBucketLoggingStatus(setBucketLoggingStatus18);
}
public GetObjectAccessControlPolicyResponse getObjectAccessControlPolicy(
GetObjectAccessControlPolicy getObjectAccessControlPolicy20) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getObjectAccessControlPolicy(getObjectAccessControlPolicy20);
}
public DeleteObjectResponse deleteObject (DeleteObject deleteObject22) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.deleteObject (deleteObject22);
}
public SetBucketAccessControlPolicyResponse setBucketAccessControlPolicy(
SetBucketAccessControlPolicy setBucketAccessControlPolicy24) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.setBucketAccessControlPolicy(setBucketAccessControlPolicy24);
}
public SetObjectAccessControlPolicyResponse setObjectAccessControlPolicy(
SetObjectAccessControlPolicy setObjectAccessControlPolicy26) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.setObjectAccessControlPolicy(setObjectAccessControlPolicy26);
}
public PutObjectInlineResponse putObjectInline (PutObjectInline putObjectInline28) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.putObjectInline (putObjectInline28);
}
public GetObjectExtendedResponse getObjectExtended(GetObjectExtended getObjectExtended30) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getObjectExtended(getObjectExtended30);
}
}

View File

@ -33,7 +33,7 @@ public class UserContext {
private boolean annonymous = false;
private String accessKey;
private String secretKey;
private String canonicalUserId; // -> for us this is the accessKey
private String canonicalUserId; // In our design, we re-use the accessKey to provide the canonicalUserId -- TODO loPri - reconsider?
private String description;
private HttpServletRequest request = null;

View File

@ -25,21 +25,13 @@ import java.io.StringWriter;
import java.io.Writer;
import java.util.Calendar;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.xml.bind.DatatypeConverter;
import javax.xml.namespace.QName;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.stream.XMLOutputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamWriter;
import org.apache.axiom.om.OMAbstractFactory;
import org.apache.axiom.om.OMFactory;
import org.apache.axis2.databinding.utils.writer.MTOMAwareXMLSerializer;
import org.apache.log4j.Logger;
import org.json.simple.parser.ParseException;
@ -58,9 +50,6 @@ import com.cloud.bridge.persist.dao.MultipartLoadDao;
import com.cloud.bridge.persist.dao.SBucketDao;
import com.cloud.bridge.service.S3Constants;
import com.cloud.bridge.service.S3RestServlet;
import com.cloud.bridge.service.S3SoapServiceImpl;
import com.cloud.bridge.service.ServiceProvider;
import com.cloud.bridge.service.ServletAction;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.service.core.s3.S3AccessControlPolicy;
import com.cloud.bridge.service.core.s3.S3BucketPolicy;
@ -91,7 +80,7 @@ import com.cloud.bridge.service.exception.PermissionDeniedException;
import com.cloud.bridge.util.Converter;
import com.cloud.bridge.util.PolicyParser;
import com.cloud.bridge.util.StringHelper;
import com.cloud.bridge.util.Tuple;
import com.cloud.bridge.util.OrderedPair;
import com.cloud.bridge.util.XSerializer;
import com.cloud.bridge.util.XSerializerXmlAdapter;
@ -103,9 +92,6 @@ public class S3BucketAction implements ServletAction {
protected final static Logger logger = Logger.getLogger(S3BucketAction.class);
private DocumentBuilderFactory dbf = null;
private OMFactory factory = OMAbstractFactory.getOMFactory();
private XMLOutputFactory xmlOutFactory = XMLOutputFactory.newInstance();
public S3BucketAction() {
dbf = DocumentBuilderFactory.newInstance();
dbf.setNamespaceAware( true );
@ -400,7 +386,7 @@ public class S3BucketAction implements ServletAction {
S3ListAllMyBucketsResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest);
// To allow the all buckets list to be serialized via Axiom classes
ListAllMyBucketsResponse allBuckets = S3SoapServiceImpl.toListAllMyBucketsResponse( engineResponse );
ListAllMyBucketsResponse allBuckets = S3SerializableServiceImplementation.toListAllMyBucketsResponse( engineResponse );
OutputStream outputStream = response.getOutputStream();
response.setStatus(200);
@ -429,7 +415,7 @@ public class S3BucketAction implements ServletAction {
S3ListBucketResponse engineResponse = ServiceProvider.getInstance().getS3Engine().listBucketContents( engineRequest, false );
// To allow the all list buckets result to be serialized via Axiom classes
ListBucketResponse oneBucket = S3SoapServiceImpl.toListBucketResponse( engineResponse );
ListBucketResponse oneBucket = S3SerializableServiceImplementation.toListBucketResponse( engineResponse );
OutputStream outputStream = response.getOutputStream();
response.setStatus(200);
@ -452,13 +438,13 @@ public class S3BucketAction implements ServletAction {
cal.set( 1970, 1, 1 );
engineRequest.setAccessKey(UserContext.current().getAccessKey());
engineRequest.setRequestTimestamp( cal );
engineRequest.setSignature( "" );
engineRequest.setSignature( "" ); // TODO - Provide signature
engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY));
S3AccessControlPolicy engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest);
// To allow the bucket acl policy result to be serialized via Axiom classes
GetBucketAccessControlPolicyResponse onePolicy = S3SoapServiceImpl.toGetBucketAccessControlPolicyResponse( engineResponse );
GetBucketAccessControlPolicyResponse onePolicy = S3SerializableServiceImplementation.toGetBucketAccessControlPolicyResponse( engineResponse );
OutputStream outputStream = response.getOutputStream();
response.setStatus(200);
@ -757,7 +743,7 @@ public class S3BucketAction implements ServletAction {
}
public void executePutBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException {
// TODO -- HiPri - Undertake checks on Put Bucket Website
// TODO -- LoPri - Undertake checks on Put Bucket Website
// Tested using configuration <Directory /Users/john1/S3-Mount>\nAllowOverride FileInfo AuthConfig Limit...</Directory> in httpd.conf
// Need some way of using AllowOverride to allow use of .htaccess and then pushing .httaccess file to bucket subdirectory of mount point
// Currently has noop effect in the sense that a running apachectl process sees the directory contents without further action
@ -825,7 +811,7 @@ public class S3BucketAction implements ServletAction {
// [B] Query the multipart table to get the list of current uploads
try {
MultipartLoadDao uploadDao = new MultipartLoadDao();
Tuple<S3MultipartUpload[],Boolean> result = uploadDao.getInitiatedUploads( bucketName, maxUploads, prefix, keyMarker, uploadIdMarker );
OrderedPair<S3MultipartUpload[],Boolean> result = uploadDao.getInitiatedUploads( bucketName, maxUploads, prefix, keyMarker, uploadIdMarker );
uploads = result.getFirst();
isTruncated = result.getSecond().booleanValue();
}

View File

@ -30,17 +30,11 @@ import javax.activation.DataHandler;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.xml.bind.DatatypeConverter;
import javax.xml.namespace.QName;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.stream.XMLOutputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamWriter;
import org.apache.axiom.om.OMAbstractFactory;
import org.apache.axiom.om.OMFactory;
import org.apache.axis2.databinding.utils.writer.MTOMAwareXMLSerializer;
import org.apache.log4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
@ -55,9 +49,6 @@ import com.cloud.bridge.persist.dao.MultipartLoadDao;
import com.cloud.bridge.persist.dao.SBucketDao;
import com.cloud.bridge.service.S3Constants;
import com.cloud.bridge.service.S3RestServlet;
import com.cloud.bridge.service.S3SoapServiceImpl;
import com.cloud.bridge.service.ServiceProvider;
import com.cloud.bridge.service.ServletAction;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.service.core.s3.S3AccessControlPolicy;
import com.cloud.bridge.service.core.s3.S3AuthParams;
@ -83,7 +74,7 @@ import com.cloud.bridge.util.Converter;
import com.cloud.bridge.util.DateHelper;
import com.cloud.bridge.util.HeaderParam;
import com.cloud.bridge.util.ServletRequestDataSource;
import com.cloud.bridge.util.Tuple;
import com.cloud.bridge.util.OrderedPair;
/**
* @author Kelven Yang, John Zucker
@ -92,8 +83,6 @@ public class S3ObjectAction implements ServletAction {
protected final static Logger logger = Logger.getLogger(S3ObjectAction.class);
private DocumentBuilderFactory dbf = null;
private OMFactory factory = OMAbstractFactory.getOMFactory();
private XMLOutputFactory xmlOutFactory = XMLOutputFactory.newInstance();
public S3ObjectAction() {
dbf = DocumentBuilderFactory.newInstance();
@ -216,7 +205,7 @@ public class S3ObjectAction implements ServletAction {
if (null != versionId) response.addHeader( "x-amz-version-id", versionId );
// To allow the copy object result to be serialized via Axiom classes
CopyObjectResponse allBuckets = S3SoapServiceImpl.toCopyObjectResponse( engineResponse );
CopyObjectResponse allBuckets = S3SerializableServiceImplementation.toCopyObjectResponse( engineResponse );
OutputStream outputStream = response.getOutputStream();
response.setStatus(200);
@ -255,7 +244,7 @@ public class S3ObjectAction implements ServletAction {
// To allow the get object acl policy result to be serialized via Axiom classes
GetObjectAccessControlPolicyResponse onePolicy = S3SoapServiceImpl.toGetObjectAccessControlPolicyResponse( engineResponse );
GetObjectAccessControlPolicyResponse onePolicy = S3SerializableServiceImplementation.toGetObjectAccessControlPolicyResponse( engineResponse );
OutputStream outputStream = response.getOutputStream();
response.setStatus(200);
@ -273,21 +262,29 @@ public class S3ObjectAction implements ServletAction {
{
S3PutObjectRequest putRequest = null;
// -> reuse the Access Control List parsing code that was added to support DIME
String ACLsetting = request.getHeader("x-amz-acl");
String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY);
String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY);
try {
putRequest = S3RestServlet.toEnginePutObjectRequest( request.getInputStream());
}
catch( Exception e ) {
throw new IOException( e.toString());
}
if ( null == ACLsetting )
// -> reuse the Access Control List parsing code that was added to support DIME
try {
putRequest = S3RestServlet.toEnginePutObjectRequest( request.getInputStream());
}
catch( Exception e ) {
throw new IOException( e.toString());
}
// -> reuse the SOAP code to save the passed in ACLs
S3SetObjectAccessControlPolicyRequest engineRequest = new S3SetObjectAccessControlPolicyRequest();
engineRequest.setBucketName( bucketName );
engineRequest.setKey( key );
engineRequest.setAcl( putRequest.getAcl());
// if (null == putRequest)
// engineRequest.setAcl (an S3AccessContolList) // (ACLsetting);
// else
engineRequest.setAcl( putRequest.getAcl());
// -> is this a request for a specific version of the object? look for "versionId=" in the query string
String queryString = request.getQueryString();
@ -357,7 +354,7 @@ public class S3ObjectAction implements ServletAction {
S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n");
}
String contentType = request.getHeader( "Content-Type" );
// String contentType = request.getHeader( "Content-Type" ); TODO - Needed?
long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0);
String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY);
@ -644,7 +641,7 @@ public class S3ObjectAction implements ServletAction {
long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0);
String md5 = request.getHeader( "Content-MD5" );
// String md5 = request.getHeader( "Content-MD5" ); TODO _ Needed?
String temp = request.getParameter("uploadId");
if (null != temp) uploadId = Integer.parseInt( temp );
@ -753,7 +750,7 @@ public class S3ObjectAction implements ServletAction {
// [C] Parse the given XML body part and perform error checking
Tuple<Integer,String> match = verifyParts( request.getInputStream(), parts );
OrderedPair<Integer,String> match = verifyParts( request.getInputStream(), parts );
if (200 != match.getFirst().intValue()) {
response.setStatus(match.getFirst().intValue());
returnErrorXML( match.getFirst().intValue(), match.getSecond(), outputStream );
@ -840,7 +837,7 @@ public class S3ObjectAction implements ServletAction {
try {
MultipartLoadDao uploadDao = new MultipartLoadDao();
Tuple<String,String> exists = uploadDao.multipartExits( uploadId );
OrderedPair<String,String> exists = uploadDao.multipartExits( uploadId );
if (null == exists) {
response.setStatus(404);
return;
@ -1122,7 +1119,7 @@ public class S3ObjectAction implements ServletAction {
* @return error code, and error string
* @throws ParserConfigurationException, IOException, SAXException
*/
private Tuple<Integer,String> verifyParts( InputStream is, S3MultipartPart[] parts )
private OrderedPair<Integer,String> verifyParts( InputStream is, S3MultipartPart[] parts )
{
try {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
@ -1147,7 +1144,7 @@ public class S3ObjectAction implements ServletAction {
nodeSet = doc.getElementsByTagName( "Part" );
count = nodeSet.getLength();
}
if (count != parts.length) return new Tuple<Integer, String>(400, "InvalidPart");
if (count != parts.length) return new OrderedPair<Integer, String>(400, "InvalidPart");
// -> get a list of all the children elements of the 'Part' parent element
for( int i=0; i < count; i++ )
@ -1179,20 +1176,20 @@ public class S3ObjectAction implements ServletAction {
// -> do the parts given in the call XML match what was previously uploaded?
if (lastNumber >= partNumber) {
return new Tuple<Integer, String>(400, "InvalidPartOrder");
return new OrderedPair<Integer, String>(400, "InvalidPartOrder");
}
if (partNumber != parts[i].getPartNumber() ||
eTag == null ||
!eTag.equalsIgnoreCase( "\"" + parts[i].getETag() + "\"" )) {
return new Tuple<Integer, String>(400, "InvalidPart");
return new OrderedPair<Integer, String>(400, "InvalidPart");
}
lastNumber = partNumber;
}
return new Tuple<Integer, String>(200, "Success");
return new OrderedPair<Integer, String>(200, "Success");
}
catch( Exception e ) {
return new Tuple<Integer, String>(500, e.toString());
return new OrderedPair<Integer, String>(500, e.toString());
}
}
}

View File

@ -13,11 +13,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
package com.cloud.bridge.service.controller.s3;
import java.io.IOException;
import java.util.Calendar;
import java.util.TimeZone;
import org.apache.axis2.AxisFault;
import org.apache.log4j.Logger;
@ -108,12 +106,38 @@ import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest;
import com.cloud.bridge.service.core.s3.S3SetObjectAccessControlPolicyRequest;
import com.cloud.bridge.service.exception.InternalErrorException;
public class S3SoapServiceImpl implements AmazonS3SkeletonInterface {
protected final static Logger logger = Logger.getLogger(S3SoapServiceImpl.class);
/* @Author Kelven Yang, John Zucker
* Implementation of S3 service requests as operations defined by the interface, com.amazon.s3.AmazonS3SkeletonInterface.
* The operations dispatched from this class are of the form of SOAP operations which define business logic to be executed by the request.
* The methods required for S3 services in accordance with the skeleton are either implementations of the following
* getBucketLoggingStatus
* copyObject
* getBucketAccessControlPolicy
* listBucket
* putObject
* createBucket
* listAllMyBuckets
* getObject
* deleteBucket
* setBucketLoggingStatus
* getObjectAccessControlPolicy
* deleteObject
* setBucketAccessControlPolicy
* setObjectAccessControlPolicy
* putObjectInline
* getObjectExtended
* or throw and Axis2 fault otherwise.
* These skeleton methods can be used as the implementation of services to satisfy SOAP calls, but also to provide the output
* to be serialized by the AXIOM XML processor.
*
* */
public class S3SerializableServiceImplementation implements AmazonS3SkeletonInterface {
protected final static Logger logger = Logger.getLogger(S3SerializableServiceImplementation.class);
private S3Engine engine;
public S3SoapServiceImpl(S3Engine engine) {
public S3SerializableServiceImplementation(S3Engine engine) {
this.engine = engine;
}
@ -152,7 +176,7 @@ public class S3SoapServiceImpl implements AmazonS3SkeletonInterface {
public GetBucketAccessControlPolicyResponse getBucketAccessControlPolicy(
GetBucketAccessControlPolicy getBucketAccessControlPolicy) {
// after authentication, we should setup user context
// TODO - after authentication, we should setup user context
return toGetBucketAccessControlPolicyResponse(engine.handleRequest(
toEngineGetBucketAccessControlPolicyRequest(getBucketAccessControlPolicy)));
}
@ -182,7 +206,7 @@ public class S3SoapServiceImpl implements AmazonS3SkeletonInterface {
request.setBucketName(setBucketAccessControlPolicy.getBucket());
request.setAcl(toEngineAccessControlList(setBucketAccessControlPolicy.getAccessControlList()));
S3Response basicResponse = engine.handleRequest(request);
// S3Response basicResponse = engine.handleRequest(request); TODO - Needed?
SetBucketAccessControlPolicyResponse response = new SetBucketAccessControlPolicyResponse();
return response;
}

View File

@ -15,7 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.cloud.bridge.service;
package com.cloud.bridge.service.controller.s3;
import java.io.File;
@ -48,6 +48,8 @@ import com.cloud.bridge.persist.PersistException;
import com.cloud.bridge.persist.dao.MHostDao;
import com.cloud.bridge.persist.dao.SHostDao;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.EC2SoapServiceImpl;
import com.cloud.bridge.service.UserInfo;
import com.cloud.bridge.service.core.ec2.EC2Engine;
import com.cloud.bridge.service.core.s3.S3BucketPolicy;
import com.cloud.bridge.service.core.s3.S3Engine;
@ -55,7 +57,7 @@ import com.cloud.bridge.service.exception.ConfigurationException;
import com.cloud.bridge.util.ConfigurationHelper;
import com.cloud.bridge.util.DateHelper;
import com.cloud.bridge.util.NetHelper;
import com.cloud.bridge.util.Tuple;
import com.cloud.bridge.util.OrderedPair;
/**
* @author Kelven Yang
@ -85,7 +87,7 @@ public class ServiceProvider {
// register service implementation object
engine = new S3Engine();
EC2_engine = new EC2Engine();
serviceMap.put(AmazonS3SkeletonInterface.class, new S3SoapServiceImpl(engine));
serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine));
serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine));
}
@ -117,18 +119,18 @@ public class ServiceProvider {
* We return a tuple to distinguish between two cases:
* (1) there is no entry in the map for bucketName, and (2) there is a null entry
* in the map for bucketName. In case 2, the database was inspected for the
* bucket policy but it had none so we remember it here to reduce database lookups.
* bucket policy but it had none so we cache it here to reduce database lookups.
* @param bucketName
* @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined
* even if its set at null.
*/
public Tuple<S3BucketPolicy,Integer> getBucketPolicy(String bucketName) {
public OrderedPair<S3BucketPolicy,Integer> getBucketPolicy(String bucketName) {
if (policyMap.containsKey( bucketName )) {
S3BucketPolicy policy = policyMap.get( bucketName );
return new Tuple<S3BucketPolicy,Integer>( policy, 0 );
return new OrderedPair<S3BucketPolicy,Integer>( policy, 0 );
}
else return new Tuple<S3BucketPolicy,Integer>( null, -1 );
else return new OrderedPair<S3BucketPolicy,Integer>( null, -1 ); // For case (1) where the map has no entry for bucketName
}
/**

View File

@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
package com.cloud.bridge.service.controller.s3;
import java.io.IOException;

View File

@ -13,15 +13,14 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
package com.cloud.bridge.service.core.s3;
import java.io.InputStream;
import java.io.OutputStream;
import javax.activation.DataHandler;
import com.cloud.bridge.service.core.s3.S3MultipartPart;
import com.cloud.bridge.util.Tuple;
import com.cloud.bridge.util.OrderedPair;
/**
* @author Kelven Yang
@ -34,5 +33,5 @@ public interface S3BucketAdapter {
DataHandler loadObject(String mountedRoot, String bucket, String fileName);
DataHandler loadObjectRange(String mountedRoot, String bucket, String fileName, long startPos, long endPos);
void deleteObject(String mountedRoot, String bucket, String fileName);
Tuple<String, Long> concatentateObjects(String mountedRoot, String destBucket, String fileName, String sourceBucket, S3MultipartPart[] parts, OutputStream os);
OrderedPair<String, Long> concatentateObjects(String mountedRoot, String destBucket, String fileName, String sourceBucket, S3MultipartPart[] parts, OutputStream os);
}

View File

@ -39,6 +39,7 @@ import org.hibernate.LockMode;
import org.hibernate.Session;
import org.json.simple.parser.ParseException;
import com.cloud.bridge.io.S3FileSystemBucketAdapter;
import com.cloud.bridge.model.MHost;
import com.cloud.bridge.model.MHostMount;
import com.cloud.bridge.model.SAcl;
@ -58,10 +59,8 @@ import com.cloud.bridge.persist.dao.SHostDao;
import com.cloud.bridge.persist.dao.SMetaDao;
import com.cloud.bridge.persist.dao.SObjectDao;
import com.cloud.bridge.persist.dao.SObjectItemDao;
import com.cloud.bridge.service.S3BucketAdapter;
import com.cloud.bridge.service.S3FileSystemBucketAdapter;
import com.cloud.bridge.service.ServiceProvider;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess;
import com.cloud.bridge.service.core.s3.S3CopyObjectRequest.MetadataDirective;
import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions;
@ -78,10 +77,10 @@ import com.cloud.bridge.service.exception.UnsupportedException;
import com.cloud.bridge.util.DateHelper;
import com.cloud.bridge.util.PolicyParser;
import com.cloud.bridge.util.StringHelper;
import com.cloud.bridge.util.Tuple;
import com.cloud.bridge.util.OrderedPair;
/**
* @author Kelven Yang
* @author Kelven Yang, John Zucker
*/
public class S3Engine {
protected final static Logger logger = Logger.getLogger(S3Engine.class);
@ -172,7 +171,7 @@ public class S3Engine {
if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS))
{
Tuple<SHost, String> shostTuple = null;
OrderedPair<SHost, String> shostTuple = null;
boolean success = false;
try {
SBucketDao bucketDao = new SBucketDao();
@ -249,20 +248,20 @@ public class S3Engine {
// -> delete the file
Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(sbucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());
bucketAdapter.deleteContainer(tupleBucketHost.getSecond(), request.getBucketName());
OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(sbucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName());
// -> cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. We
// need to perform deletion of these objects related to bucket manually.
// Delete SMeta & SAcl objects: (1)Get all the objects in the bucket, (2)then all the items in each object, (3) then all meta & acl data for each item
Set<SObject> objectsInBucket = sbucket.getObjectsInBucket();
Iterator it = objectsInBucket.iterator();
Iterator<SObject> it = objectsInBucket.iterator();
while( it.hasNext())
{
SObject oneObject = (SObject)it.next();
Set<SObjectItem> itemsInObject = oneObject.getItems();
Iterator is = itemsInObject.iterator();
Iterator<SObjectItem> is = itemsInObject.iterator();
while( is.hasNext())
{
SObjectItem oneItem = (SObjectItem)is.next();
@ -446,12 +445,12 @@ public class S3Engine {
return 404;
}
Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());
OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
try {
MultipartLoadDao uploadDao = new MultipartLoadDao();
Tuple<String,String> exists = uploadDao.multipartExits( uploadId );
OrderedPair<String,String> exists = uploadDao.multipartExits( uploadId );
if (null == exists) {
logger.error( "initiateMultipartUpload failed since multipart upload" + uploadId + " does not exist" );
return 404;
@ -474,7 +473,7 @@ public class S3Engine {
S3MultipartPart[] parts = uploadDao.getParts( uploadId, 10000, 0 );
for( int i=0; i < parts.length; i++ )
{
bucketAdapter.deleteObject( tupleBucketHost.getSecond(), ServiceProvider.getInstance().getMultipartDir(), parts[i].getPath());
bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), parts[i].getPath());
}
uploadDao.deleteUpload( uploadId );
@ -558,14 +557,14 @@ public class S3Engine {
context.setKeyName( request.getKey());
verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE );
Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());
OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
String itemFileName = new String( uploadId + "-" + partNumber );
InputStream is = null;
try {
is = request.getDataInputStream();
String md5Checksum = bucketAdapter.saveObject(is, tupleBucketHost.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName);
String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName);
response.setETag(md5Checksum);
MultipartLoadDao uploadDao = new MultipartLoadDao();
@ -622,17 +621,17 @@ public class S3Engine {
// [B] Now we need to create the final re-assembled object
// -> the allocObjectItem checks for the bucket policy PutObject permissions
Tuple<SObject, SObjectItem> tupleObjectItem = allocObjectItem(bucket, key, meta, null, request.getCannedAccess());
Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);
OrderedPair<SObject, SObjectItem> object_objectitem_pair = allocObjectItem(bucket, key, meta, null, request.getCannedAccess());
OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());
String itemFileName = tupleObjectItem.getSecond().getStoredPath();
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
// -> Amazon defines that we must return a 200 response immediately to the client, but
// -> we don't know the version header until we hit here
httpResp.setStatus(200);
httpResp.setContentType("text/xml; charset=UTF-8");
String version = tupleObjectItem.getSecond().getVersion();
String version = object_objectitem_pair.getSecond().getVersion();
if (null != version) httpResp.addHeader( "x-amz-version-id", version );
httpResp.flushBuffer();
@ -642,12 +641,12 @@ public class S3Engine {
// explicit transaction control to avoid holding transaction during long file concatenation process
PersistContext.commitTransaction();
Tuple<String, Long> result = bucketAdapter.concatentateObjects( tupleBucketHost.getSecond(), bucket.getName(), itemFileName, ServiceProvider.getInstance().getMultipartDir(), parts, os );
OrderedPair<String, Long> result = bucketAdapter.concatentateObjects( host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName, ServiceProvider.getInstance().getMultipartDir(), parts, os );
response.setETag(result.getFirst());
response.setLastModified(DateHelper.toCalendar( tupleObjectItem.getSecond().getLastModifiedTime()));
response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
SObjectItemDao itemDao = new SObjectItemDao();
SObjectItem item = itemDao.get( tupleObjectItem.getSecond().getId());
SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId());
item.setMd5(result.getFirst());
item.setStoredSize(result.getSecond().longValue());
response.setResultCode(200);
@ -674,13 +673,13 @@ public class S3Engine {
if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
// -> is the caller allowed to write the object?
// -> the allocObjectItem checks for the bucket policy PutObject permissions
Tuple<SObject, SObjectItem> tupleObjectItem = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess());
Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);
// Is the caller allowed to write the object?
// The allocObjectItem checks for the bucket policy PutObject permissions
OrderedPair<SObject, SObjectItem> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess());
OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());
String itemFileName = tupleObjectItem.getSecond().getStoredPath();
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
InputStream is = null;
try {
@ -688,13 +687,13 @@ public class S3Engine {
PersistContext.commitTransaction();
is = request.getDataInputStream();
String md5Checksum = bucketAdapter.saveObject(is, tupleBucketHost.getSecond(), bucket.getName(), itemFileName);
String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName);
response.setETag(md5Checksum);
response.setLastModified(DateHelper.toCalendar( tupleObjectItem.getSecond().getLastModifiedTime()));
response.setVersion( tupleObjectItem.getSecond().getVersion());
response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
response.setVersion( object_objectitem_pair.getSecond().getVersion());
SObjectItemDao itemDao = new SObjectItemDao();
SObjectItem item = itemDao.get( tupleObjectItem.getSecond().getId());
SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId());
item.setMd5(md5Checksum);
item.setStoredSize(contentLength);
PersistContext.getSession().save(item);
@ -729,25 +728,25 @@ public class S3Engine {
SBucket bucket = bucketDao.getByName(bucketName);
if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
// -> is the caller allowed to write the object?
// -> the allocObjectItem checks for the bucket policy PutObject permissions
Tuple<SObject, SObjectItem> tupleObjectItem = allocObjectItem(bucket, key, meta, acl, null);
Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);
// Is the caller allowed to write the object?
// The allocObjectItem checks for the bucket policy PutObject permissions
OrderedPair<SObject, SObjectItem> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null);
OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());
String itemFileName = tupleObjectItem.getSecond().getStoredPath();
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
InputStream is = null;
try {
// explicit transaction control to avoid holding transaction during file-copy process
PersistContext.commitTransaction();
is = request.getInputStream();
String md5Checksum = bucketAdapter.saveObject(is, tupleBucketHost.getSecond(), bucket.getName(), itemFileName);
String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName);
response.setETag(md5Checksum);
response.setLastModified(DateHelper.toCalendar( tupleObjectItem.getSecond().getLastModifiedTime()));
response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
SObjectItemDao itemDao = new SObjectItemDao();
SObjectItem item = itemDao.get( tupleObjectItem.getSecond().getId());
SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId());
item.setMd5(md5Checksum);
item.setStoredSize(contentLength);
PersistContext.getSession().save(item);
@ -1000,7 +999,7 @@ public class S3Engine {
{
int i = 0;
S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ];
ListIterator it = itemMetaData.listIterator();
ListIterator<SMeta> it = itemMetaData.listIterator();
while( it.hasNext()) {
SMeta oneTag = (SMeta)it.next();
S3MetaDataEntry oneEntry = new S3MetaDataEntry();
@ -1025,7 +1024,7 @@ public class S3Engine {
response.setVersion( item.getVersion());
if (request.isInlineData())
{
Tuple<SHost, String> tupleSHostInfo = getBucketStorageHost(sbucket);
OrderedPair<SHost, String> tupleSHostInfo = getBucketStorageHost(sbucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst());
if ( 0 <= bytesStart && 0 <= bytesEnd )
@ -1132,9 +1131,9 @@ public class S3Engine {
// -> delete the file holding the object
if (null != storedPath)
{
Tuple<SHost, String> tupleBucketHost = getBucketStorageHost( sbucket );
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( tupleBucketHost.getFirst());
bucketAdapter.deleteObject( tupleBucketHost.getSecond(), bucketName, storedPath );
OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost( sbucket );
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( host_storagelocation_pair.getFirst());
bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), bucketName, storedPath );
}
response.setResultCode(204);
@ -1147,7 +1146,7 @@ public class S3Engine {
List<SMeta> itemMetaData = metaDao.getByTarget( "SObjectItem", itemId );
if (null != itemMetaData)
{
ListIterator it = itemMetaData.listIterator();
ListIterator<SMeta> it = itemMetaData.listIterator();
while( it.hasNext()) {
SMeta oneTag = (SMeta)it.next();
metaDao.delete( oneTag );
@ -1160,7 +1159,7 @@ public class S3Engine {
List<SAcl> itemAclData = aclDao.listGrants( target, itemId );
if (null != itemAclData)
{
ListIterator it = itemAclData.listIterator();
ListIterator<SAcl> it = itemAclData.listIterator();
while( it.hasNext()) {
SAcl oneTag = (SAcl)it.next();
aclDao.delete( oneTag );
@ -1173,7 +1172,7 @@ public class S3Engine {
List<SAcl> bucketAclData = aclDao.listGrants( "SBucket", bucketId );
if (null != bucketAclData)
{
ListIterator it = bucketAclData.listIterator();
ListIterator<SAcl> it = bucketAclData.listIterator();
while( it.hasNext()) {
SAcl oneTag = (SAcl)it.next();
aclDao.delete( oneTag );
@ -1318,18 +1317,18 @@ public class S3Engine {
return entry;
}
public Tuple<SHost, String> getBucketStorageHost(SBucket bucket)
public OrderedPair<SHost, String> getBucketStorageHost(SBucket bucket)
{
MHostMountDao mountDao = new MHostMountDao();
SHost shost = bucket.getShost();
if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_LOCAL) {
return new Tuple<SHost, String>(shost, shost.getExportRoot());
return new OrderedPair<SHost, String>(shost, shost.getExportRoot());
}
MHostMount mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId());
if(mount != null) {
return new Tuple<SHost, String>(shost, mount.getMountPath());
return new OrderedPair<SHost, String>(shost, mount.getMountPath());
}
// need to redirect request to other node
@ -1364,7 +1363,7 @@ public class S3Engine {
* @param overrideName
* @return
*/
private Tuple<SHost, String> allocBucketStorageHost(String bucketName, String overrideName)
private OrderedPair<SHost, String> allocBucketStorageHost(String bucketName, String overrideName)
{
MHostDao mhostDao = new MHostDao();
SHostDao shostDao = new SHostDao();
@ -1379,7 +1378,7 @@ public class S3Engine {
MHostMount mount = mounts[random.nextInt(mounts.length)];
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(mount.getShost());
bucketAdapter.createContainer(mount.getMountPath(), (null != overrideName ? overrideName : bucketName));
return new Tuple<SHost, String>(mount.getShost(), mount.getMountPath());
return new OrderedPair<SHost, String>(mount.getShost(), mount.getMountPath());
}
// To make things simple, only allow one local mounted storage root
@ -1391,7 +1390,7 @@ public class S3Engine {
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(localSHost);
bucketAdapter.createContainer(localSHost.getExportRoot(),(null != overrideName ? overrideName : bucketName));
return new Tuple<SHost, String>(localSHost, localStorageRoot);
return new OrderedPair<SHost, String>(localSHost, localStorageRoot);
}
throw new OutOfStorageException("No storage host is available");
@ -1415,7 +1414,7 @@ public class S3Engine {
* @throws IOException
*/
@SuppressWarnings("deprecation")
public Tuple<SObject, SObjectItem> allocObjectItem(SBucket bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy)
public OrderedPair<SObject, SObjectItem> allocObjectItem(SBucket bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy)
{
SObjectDao objectDao = new SObjectDao();
SObjectItemDao objectItemDao = new SObjectItemDao();
@ -1521,7 +1520,7 @@ public class S3Engine {
}
session.update(item);
return new Tuple<SObject, SObjectItem>(object, item);
return new OrderedPair<SObject, SObjectItem>(object, item);
}
@ -1623,7 +1622,11 @@ public class S3Engine {
{
S3BucketPolicy policy = null;
// -> on error of getting a policy ignore it
// Ordinarily a REST request will pass in an S3PolicyContext for a given bucket by this stage. The HttpServletRequest object
// should be held in the UserContext ready for extraction of the S3BucketPolicy.
// If there is an error in obtaining the request object or in loading the policy then log the failure and return a S3PolicyContext
// which indicates DEFAULT_DENY. Where there is no failure, the policy returned should be specific to the Canonical User ID of the requester.
try {
// -> in SOAP the HttpServletRequest object is hidden and not passed around
if (null != context) {
@ -1669,14 +1672,14 @@ public class S3Engine {
}
/**
* This function verifies that the accessing client has the requested
* permission on the object/bucket/Acl represented by the tuble: <target, targetId>
* This method verifies that the accessing client has the requested
* permission on the object/bucket/Acl represented by the tuple: <target, targetId>
*
* For cases where an ACL is meant for any authenticated user we place a "*" for the
* Canonical User Id ("*" is not a legal Cloud Stack Access key).
* Canonical User Id. N.B. - "*" is not a legal Cloud (Bridge) Access key.
*
* For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the
* Canonical User Id ("A" is not a legal Cloud Stack Access key).
* Canonical User Id. N.B. - "A" is not a legal Cloud (Bridge) Access key.
*/
public static void accessAllowed( String target, long targetId, int requestedPermission )
{
@ -1684,25 +1687,25 @@ public class S3Engine {
SAclDao aclDao = new SAclDao();
// -> if an annoymous request, then canonicalUserId is an empty string
// If an annoymous request, then canonicalUserId is an empty string
String userId = UserContext.current().getCanonicalUserId();
if ( 0 == userId.length())
{
// -> is an anonymous principal ACL set for this <target, targetId>?
// Is an anonymous principal ACL set for this <target, targetId>?
if (hasPermission( aclDao.listGrants( target, targetId, "A" ), requestedPermission )) return;
}
else
{ // -> no priviledges means no access allowed
{
if (hasPermission( aclDao.listGrants( target, targetId, userId ), requestedPermission )) return;
// -> or maybe there is any principal authenticated ACL set for this <target, targetId>?
// Or alternatively is there is any principal authenticated ACL set for this <target, targetId>?
if (hasPermission( aclDao.listGrants( target, targetId, "*" ), requestedPermission )) return;
}
// No privileges implies that no access is allowed in the case of an anonymous user
throw new PermissionDeniedException( "Access Denied - ACLs do not give user the required permission" );
}
/**
* This function assumes that the bucket has been tested to make sure it exists before
* This method assumes that the bucket has been tested to make sure it exists before
* it is called.
*
* @param context
@ -1712,7 +1715,7 @@ public class S3Engine {
public static S3BucketPolicy loadPolicy( S3PolicyContext context )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException
{
Tuple<S3BucketPolicy,Integer> result = ServiceProvider.getInstance().getBucketPolicy( context.getBucketName());
OrderedPair<S3BucketPolicy,Integer> result = ServiceProvider.getInstance().getBucketPolicy( context.getBucketName());
S3BucketPolicy policy = result.getFirst();
if ( null == policy )
{
@ -1772,7 +1775,8 @@ public class S3Engine {
int fourth = Integer.parseInt( parts[3] );
throw new InvalidBucketName( bucketName + " is formatted as an IP address" );
}
catch( NumberFormatException e ) {}
catch( NumberFormatException e )
{throw new InvalidBucketName( bucketName);}
}
@ -1804,12 +1808,12 @@ public class S3Engine {
}
}
private static boolean hasPermission( List<SAcl> priviledges, int requestedPermission )
private static boolean hasPermission( List<SAcl> privileges, int requestedPermission )
{
ListIterator it = priviledges.listIterator();
ListIterator<SAcl> it = privileges.listIterator();
while( it.hasNext())
{
// -> is the requested permission "contained" in one or the granted rights for this user
// True providing the requested permission is contained in one or the granted rights for this user. False otherwise.
SAcl rights = (SAcl)it.next();
int permission = rights.getPermission();
if (requestedPermission == (permission & requestedPermission)) return true;
@ -1818,13 +1822,13 @@ public class S3Engine {
}
/**
* ifRange is true and IfUnmodifiedSince or IfMatch fails then we return the entire object (indicated by
* ifRange is true and ifUnmodifiedSince or IfMatch fails then we return the entire object (indicated by
* returning a -1 as the function result.
*
* @param ifCond - conditional get defined by these tests
* @param lastModified - value used on ifModifiedSince or ifUnmodifiedSince
* @param ETag - value used on ifMatch and ifNoneMatch
* @param ifRange - using an If-Range HTTP functionality
* @param ifRange - using an if-Range HTTP functionality
* @return -1 means return the entire object with an HTTP 200 (not a subrange)
*/
private int conditionPassed( S3ConditionalHeaders ifCond, Date lastModified, String ETag, boolean ifRange )

View File

@ -22,7 +22,7 @@ import java.io.InputStream;
import javax.activation.DataHandler;
/**
* @author Kelven Yang
* @author Kelven Yang, John Zucker
*/
public class S3PutObjectInlineRequest extends S3Request {
protected String bucketName;
@ -30,7 +30,7 @@ public class S3PutObjectInlineRequest extends S3Request {
protected long contentLength;
protected S3MetaDataEntry[] metaEntries;
protected S3AccessControlList acl;
protected String cannedAccessPolicy; // -> REST only sets an acl with a simple keyword
protected String cannedAccessPolicy; // Canned ACLs are public-read, public-read-write, private, authenticated-read or log-delivery-write
protected DataHandler data;
protected String dataAsString;

View File

@ -1,325 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.tool;
import java.io.File;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Map;
import javax.activation.DataHandler;
import org.apache.axis2.AxisFault;
import org.apache.log4j.xml.DOMConfigurator;
import com.amazon.s3.client.AmazonS3Stub;
import com.amazon.s3.client.AmazonS3Stub.CreateBucket;
import com.amazon.s3.client.AmazonS3Stub.CreateBucketResponse;
import com.amazon.s3.client.AmazonS3Stub.DeleteBucket;
import com.amazon.s3.client.AmazonS3Stub.DeleteBucketResponse;
import com.amazon.s3.client.AmazonS3Stub.DeleteObject;
import com.amazon.s3.client.AmazonS3Stub.DeleteObjectResponse;
import com.amazon.s3.client.AmazonS3Stub.ListBucket;
import com.amazon.s3.client.AmazonS3Stub.ListBucketResponse;
import com.amazon.s3.client.AmazonS3Stub.ListBucketResult;
import com.amazon.s3.client.AmazonS3Stub.ListEntry;
import com.amazon.s3.client.AmazonS3Stub.PrefixEntry;
import com.amazon.s3.client.AmazonS3Stub.PutObjectInline;
import com.amazon.s3.client.AmazonS3Stub.PutObjectInlineResponse;
import com.amazon.s3.client.AmazonS3Stub.Status;
/**
* @author Kelven Yang
*/
public class CloudS3CmdTool {
private String serviceUrl;
private AmazonS3Stub serviceStub;
private static void configLog4j() {
URL configUrl = System.class.getResource("/conf/log4j-cloud-bridge.xml");
if(configUrl == null)
configUrl = ClassLoader.getSystemResource("log4j-cloud-bridge.xml");
if(configUrl == null)
configUrl = ClassLoader.getSystemResource("conf/log4j-cloud-bridge.xml");
if(configUrl != null) {
try {
System.out.println("Configure log4j using " + configUrl.toURI().toString());
} catch (URISyntaxException e1) {
e1.printStackTrace();
}
try {
File file = new File(configUrl.toURI());
System.out.println("Log4j configuration from : " + file.getAbsolutePath());
DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000);
} catch (URISyntaxException e) {
System.out.println("Unable to convert log4j configuration Url to URI");
}
} else {
System.out.println("Configure log4j with default properties");
}
}
private static Map<String, String> getNamedParameters(String[] args) {
Map<String, String> params = new HashMap<String, String>();
for(int i = 1; i < args.length; i++) {
if(args[i].charAt(0) == '-') {
String[] tokens = args[i].substring(1).split("=");
if(tokens.length == 2) {
params.put(tokens[0], tokens[1]);
}
}
}
return params;
}
private static boolean validateNamedParameters(Map<String, String> params, String... keys) {
for(String key : keys) {
if(params.get(key) == null || params.get(key).isEmpty())
return false;
}
return true;
}
public static void main(String[] args) {
configLog4j();
(new CloudS3CmdTool()).run(args);
}
private void run(String[] args) {
Map<String, String> env = System.getenv();
for (String envName : env.keySet()) {
if(envName.equals("CLOUD_SERVICE_URL"))
serviceUrl = env.get(envName);
}
if(serviceUrl == null) {
System.out.println("Please set CLOUD_SERVICE_URL environment variable");
System.exit(0);
}
if(args.length < 1) {
System.out.println("Please specify a command to run");
System.exit(0);
}
try {
serviceStub = new AmazonS3Stub(serviceUrl);
} catch (AxisFault e) {
System.out.println("Unable to initialize service stub");
e.printStackTrace();
System.exit(0);
}
// command dispatch
if(args[0].equals("bucket-create")) {
createBucket(args);
} else if(args[0].equals("bucket-delete")) {
deleteBucket(args);
} else if(args[0].equals("bucket-list")) {
listBucket(args);
} else if(args[0].equals("object-put-inline")) {
putObjectInline(args);
} else if(args[0].equals("object-delete")) {
deleteObject(args);
}
}
private void createBucket(String[] args) {
if(args.length < 2) {
System.out.println("Usage: bucket-create <bucket-name>");
System.exit(0);
}
try {
CreateBucket bucket = new CreateBucket();
bucket.setBucket(args[1]);
bucket.setAWSAccessKeyId( "TODO1" );
bucket.setSignature("TODO2");
bucket.setTimestamp(Calendar.getInstance());
CreateBucketResponse response = serviceStub.createBucket(bucket);
System.out.println("Bucket " + response.getCreateBucketReturn().getBucketName() + " has been created successfully");
} catch(Exception e) {
System.out.println("Failed to execute bucket-create due to " + e.getMessage());
}
}
private void deleteBucket(String[] args) {
if(args.length < 2) {
System.out.println("Usage: bucket-delete <bucket-name>");
System.exit(0);
}
try {
DeleteBucket request = new DeleteBucket();
request.setBucket(args[1]);
request.setSignature("TODO");
request.setTimestamp(Calendar.getInstance());
DeleteBucketResponse response = serviceStub.deleteBucket(request);
Status status = response.getDeleteBucketResponse();
if(status.getCode() == 200) {
System.out.println("Bucket " + args[1] + " has been deleted successfully");
} else {
System.out.println("Unable to delete bucket " + args[1] + " - " + status.getDescription());
}
} catch(Exception e) {
System.out.println("Failed to execute bucket-delete due to " + e.getMessage());
}
}
private void listBucket(String[] args) {
if(args.length < 2) {
System.out.println("Usage: bucket-list -prefix=<prefix> -delimiter=<delimiter> -marker=<Marker> -max=<max items to return> <bucket name>");
System.exit(0);
}
try {
ListBucket request = new ListBucket();
Map<String, String> params = getNamedParameters(args);
request.setBucket(args[args.length - 1]);
request.setCredential("TODO");
if(params.get("prefix") != null)
request.setPrefix(params.get("prefix"));
if(params.get("delimiter") != null)
request.setDelimiter(params.get("delimiter"));
if(params.get("marker") != null)
request.setMarker(params.get("marker"));
if(params.get("max") != null) {
try {
int maxKeys = Integer.parseInt(params.get("max"));
request.setMaxKeys(maxKeys);
} catch(Exception e){
System.out.println("-max parameter should be a numeric value");
}
}
request.setAWSAccessKeyId("TODO");
request.setCredential("TODO");
request.setSignature("TODO");
request.setTimestamp(Calendar.getInstance());
ListBucketResponse response = serviceStub.listBucket(request);
ListBucketResult result = response.getListBucketResponse();
System.out.println("\tContent of Bucket " + result.getName());
System.out.println("\tListing with prefix: " + result.getPrefix() + ", delimiter: "
+ result.getDelimiter() + ", marker: " + result.getMarker() + ", max: " + result.getMaxKeys());
ListEntry[] entries = result.getContents();
if(entries != null) {
for(int i = 0; i < entries.length; i++) {
ListEntry entry = entries[i];
System.out.print("\t");
System.out.print(entry.getSize());
System.out.print("\t");
System.out.print(entry.getKey());
System.out.print("\t");
System.out.print(entry.getETag());
System.out.print("\n");
}
}
PrefixEntry[] prefixEntries = result.getCommonPrefixes();
if(prefixEntries != null) {
System.out.print("\n\n");
for(int i = 0; i < prefixEntries.length; i++) {
System.out.print("\t<Prefix>\t");
System.out.print(prefixEntries[i].getPrefix());
System.out.print("\n");
}
}
} catch(Exception e) {
System.out.println("Failed to execute bucket-list due to " + e.getMessage());
}
}
private void putObjectInline(String[] args) {
if(args.length < 4) {
System.out.println("Usage: object-put-inline -bucket=<bucket name> -name=<object name> <path to the file>");
System.exit(0);
}
Map<String, String> params = getNamedParameters(args);
if(!validateNamedParameters(params, "bucket", "name")) {
System.out.println("Usage: object-put-inline -bucket=<bucket name> -name=<object name> <path to the file>");
System.exit(0);
}
File file = new File(args[args.length -1]);
if(!file.exists()) {
System.out.println("Unable to find file " + args[args.length -1]);
System.exit(0);
}
try {
PutObjectInline request = new PutObjectInline();
request.setBucket(params.get("bucket"));
request.setKey(params.get("name"));
request.setContentLength(file.length());
request.setAWSAccessKeyId("TODO");
request.setCredential("TODO");
request.setSignature("TODO");
request.setTimestamp(Calendar.getInstance());
request.setData(new DataHandler(file.toURL()));
PutObjectInlineResponse response = serviceStub.putObjectInline(request);
System.out.println("Object has been posted successfully. ETag: " + response.getPutObjectInlineResponse().getETag());
} catch(Exception e) {
System.out.println("Failed to execute object-put-inline due to " + e.getMessage());
}
}
private void deleteObject(String[] args) {
if(args.length < 3) {
System.out.println("Usage: object-delete -bucket=<bucket name> -name=<object name>");
System.exit(0);
}
Map<String, String> params = getNamedParameters(args);
if(!validateNamedParameters(params, "bucket", "name")) {
System.out.println("Usage: object-delete -bucket=<bucket name> -name=<object name>");
System.exit(0);
}
try {
DeleteObject request = new DeleteObject();
request.setAWSAccessKeyId("TODO");
request.setBucket(params.get("bucket"));
request.setKey(params.get("name"));
request.setSignature("TODO");
request.setCredential("TODO");
request.setTimestamp(Calendar.getInstance());
DeleteObjectResponse response = serviceStub.deleteObject(request);
if(response.getDeleteObjectResponse().getCode() == 200)
System.out.println("Object " + params.get("name") + " has been deleted successfully");
else
System.out.println("Object " + params.get("name") + " can not be deleted. Error: "
+ response.getDeleteObjectResponse().getCode());
} catch(Exception e) {
System.out.println("Failed to execute object-delete due to " + e.getMessage());
}
}
}

View File

@ -15,18 +15,19 @@
*/
package com.cloud.bridge.util;
/**
*
* @author Kelven Yang
*
/**
* @author Kelven Yang, John Zucker
* Reusable class whose instances encode any ordered pair (or 2-tuple) of values of types T1 and T2
* Provide getters: getFirst(), getSecond()
* Provide setters: setFirst(val), setSecond(val)
* @param <T1>
* @param <T2>
*/
public class Tuple <T1, T2> {
public class OrderedPair <T1, T2> {
T1 first;
T2 second;
public Tuple(T1 t1, T2 t2) {
public OrderedPair(T1 t1, T2 t2) {
first = t1;
second = t2;
}
@ -35,7 +36,7 @@ public class Tuple <T1, T2> {
return first;
}
public Tuple<T1, T2> setFirst(T1 t1) {
public OrderedPair<T1, T2> setFirst(T1 t1) {
first = t1;
return this;
}
@ -44,7 +45,7 @@ public class Tuple <T1, T2> {
return second;
}
public Tuple<T1, T2> setSecond(T2 t2) {
public OrderedPair<T1, T2> setSecond(T2 t2) {
second = t2;
return this;
}

View File

@ -261,7 +261,6 @@ public class RestAuth {
* @throws UnsupportedEncodingException
*/
// TODO - Hi Pri - Make this validate correctly with StringToSign for verb POST and other cases
public boolean verifySignature( String httpVerb, String secretKey, String signature )
throws SignatureException, UnsupportedEncodingException {