Core S3 classes up to date

This commit is contained in:
JohnZ 2012-05-03 15:06:30 +01:00
parent c9792857c3
commit 4b0d7ccb31
27 changed files with 3185 additions and 5647 deletions

1
.gitignore vendored
View File

@ -1,6 +1,7 @@
build/replace.properties
build/build.number
bin/
*.000
*.pem
*.patch
*.xml000

View File

@ -1,212 +1,213 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.auth.s3;
import java.sql.SQLException;
import javax.servlet.http.HttpServletRequest;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.axiom.soap.SOAPBody;
import org.apache.log4j.Logger;
import org.apache.axis2.context.MessageContext;
import org.apache.axis2.engine.Handler;
import org.apache.axis2.AxisFault;
import org.apache.axis2.description.HandlerDescription;
import org.apache.axis2.description.Parameter;
import com.cloud.bridge.model.UserCredentials;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.util.S3SoapAuth;
/*
* For SOAP compatibility.
*/
public class AuthenticationHandler implements Handler {
protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class);
protected HandlerDescription handlerDesc = new HandlerDescription( "default handler" );
private String name = "S3AuthenticationHandler";
public void init( HandlerDescription handlerdesc )
{
this.handlerDesc = handlerdesc;
}
public String getName()
{
//logger.debug( "getName entry S3AuthenticationHandler" + name );
return name;
}
public String toString()
{
return (name != null) ? name.toString() : null;
}
public HandlerDescription getHandlerDesc()
{
return handlerDesc;
}
public Parameter getParameter( String name )
{
return handlerDesc.getParameter( name );
}
/**
* Verify the request's authentication signature by extracting all the
* necessary parts of the request, obtaining the requestor's secret key, and
* recalculating the signature.
*
* On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3
* defines as a "Client.SignatureMismatch" error.
*
* Special case: need to deal with anonymous requests where no AWSAccessKeyId is
* given. In this case just pass the request on.
*/
public InvocationResponse invoke(MessageContext msgContext) throws AxisFault
{
String accessKey = null;
String operation = null;
String msgSig = null;
String timestamp = null;
String secretKey = null;
String temp = null;
// [A] Obtain the HttpServletRequest object
HttpServletRequest httpObj =(HttpServletRequest)msgContext.getProperty("transport.http.servletRequest");
if (null != httpObj) System.out.println("S3 SOAP auth test header access - acceptable Encoding type: "+ httpObj.getHeader("Accept-Encoding"));
// [A] Try to recalculate the signature for non-anonymous requests
try
{ SOAPEnvelope soapEnvelope = msgContext.getEnvelope();
SOAPBody soapBody = soapEnvelope.getBody();
String xmlBody = soapBody.toString();
//logger.debug( "xmlrequest: " + xmlBody );
// -> did we get here yet its an EC2 request?
int offset = xmlBody.indexOf( "http://ec2.amazonaws.com" );
if (-1 != offset) return InvocationResponse.CONTINUE;
// -> if it is anonymous request, then no access key should exist
int start = xmlBody.indexOf( "AWSAccessKeyId>" );
if (-1 == start) {
UserContext.current().initContext();
return InvocationResponse.CONTINUE;
}
temp = xmlBody.substring( start+15 );
int end = temp.indexOf( "</" );
accessKey = temp.substring( 0, end );
//logger.debug( "accesskey " + accessKey );
// -> what if we cannot find the user's key?
if (null != (secretKey = lookupSecretKey( accessKey )))
{
// -> if any other field is missing, then the signature will not match
if ( null != (operation = soapBody.getFirstElementLocalName()))
operation = operation.trim();
else operation = "";
//logger.debug( "operation " + operation );
start = xmlBody.indexOf( "Timestamp>" );
if ( -1 < start )
{
temp = xmlBody.substring( start+10 );
end = temp.indexOf( "</" );
timestamp = temp.substring( 0, end );
//logger.debug( "timestamp " + timestamp );
}
else timestamp = "";
start = xmlBody.indexOf( "Signature>" );
if ( -1 < start )
{
temp = xmlBody.substring( start+10 );
end = temp.indexOf( "</" );
msgSig = temp.substring( 0, end );
//logger.debug( "signature " + msgSig );
}
else msgSig = "";
}
}
catch( Exception e )
{
logger.error("Signature calculation failed due to: ", e);
throw new AxisFault( e.toString(), "Server.InternalError" );
}
// [B] Verify that the given signature matches what we calculated here
if (null == secretKey)
{
logger.error( "Unknown AWSAccessKeyId: [" + accessKey + "]" );
throw new AxisFault( "Unknown AWSAccessKeyId: [" + accessKey + "]", "Client.InvalidAccessKeyId" );
}
// -> for SOAP requests the Cloud API keys are sent here and only here
S3SoapAuth.verifySignature( msgSig, operation, timestamp, accessKey, secretKey );
UserContext.current().initContext( accessKey, secretKey, accessKey, "S3 SOAP request", httpObj );
return InvocationResponse.CONTINUE;
}
public void revoke(MessageContext msgContext)
{
logger.info(msgContext.getEnvelope().toString());
}
public void setName(String name)
{
//logger.debug( "setName entry S3AuthenticationHandler " + name );
this.name = name;
}
/**
* Given the user's access key, then obtain his secret key in the user database.
*
* @param accessKey - a unique string allocated for each registered user
* @return the secret key or null of no matching user found
*/
private String lookupSecretKey( String accessKey )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
UserCredentialsDao credentialDao = new UserCredentialsDao();
UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey );
if ( null == cloudKeys ) {
logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
return null;
}
else return cloudKeys.getSecretKey();
}
@Override
public void cleanup()
{
//logger.debug( "cleanup entry S3AuthenticationHandler " );
}
@Override
public void flowComplete( MessageContext arg0 )
{
//logger.debug( "flowComplete entry S3AuthenticationHandler " );
}
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.auth.s3;
import java.sql.SQLException;
import javax.servlet.http.HttpServletRequest;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.axiom.soap.SOAPBody;
import org.apache.log4j.Logger;
import org.apache.axis2.context.MessageContext;
import org.apache.axis2.engine.Handler;
import org.apache.axis2.AxisFault;
import org.apache.axis2.description.HandlerDescription;
import org.apache.axis2.description.Parameter;
import com.cloud.bridge.model.UserCredentials;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.util.S3SoapAuth;
/*
* For SOAP compatibility.
*/
public class AuthenticationHandler implements Handler {
protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class);
protected HandlerDescription handlerDesc = new HandlerDescription( "default handler" );
private String name = "S3AuthenticationHandler";
public void init( HandlerDescription handlerdesc )
{
this.handlerDesc = handlerdesc;
}
public String getName()
{
//logger.debug( "getName entry S3AuthenticationHandler" + name );
return name;
}
public String toString()
{
return (name != null) ? name.toString() : null;
}
public HandlerDescription getHandlerDesc()
{
return handlerDesc;
}
public Parameter getParameter( String name )
{
return handlerDesc.getParameter( name );
}
/**
* Verify the request's authentication signature by extracting all the
* necessary parts of the request, obtaining the requestor's secret key, and
* recalculating the signature.
*
* On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3
* defines as a "Client.SignatureMismatch" error.
*
* Special case: need to deal with anonymous requests where no AWSAccessKeyId is
* given. In this case just pass the request on.
*/
public InvocationResponse invoke(MessageContext msgContext) throws AxisFault
{
String accessKey = null;
String operation = null;
String msgSig = null;
String timestamp = null;
String secretKey = null;
String temp = null;
// [A] Obtain the HttpServletRequest object
HttpServletRequest httpObj =(HttpServletRequest)msgContext.getProperty("transport.http.servletRequest");
if (null != httpObj) System.out.println("S3 SOAP auth test header access - acceptable Encoding type: "+ httpObj.getHeader("Accept-Encoding"));
// [A] Try to recalculate the signature for non-anonymous requests
try
{ SOAPEnvelope soapEnvelope = msgContext.getEnvelope();
SOAPBody soapBody = soapEnvelope.getBody();
String xmlBody = soapBody.toString();
//logger.debug( "xmlrequest: " + xmlBody );
// -> did we get here yet its an EC2 request?
int offset = xmlBody.indexOf( "http://ec2.amazonaws.com" );
if (-1 != offset) return InvocationResponse.CONTINUE;
// -> if it is anonymous request, then no access key should exist
int start = xmlBody.indexOf( "AWSAccessKeyId>" );
if (-1 == start) {
UserContext.current().initContext();
return InvocationResponse.CONTINUE;
}
temp = xmlBody.substring( start+15 );
int end = temp.indexOf( "</" );
accessKey = temp.substring( 0, end );
//logger.debug( "accesskey " + accessKey );
// -> what if we cannot find the user's key?
if (null != (secretKey = lookupSecretKey( accessKey )))
{
// -> if any other field is missing, then the signature will not match
if ( null != (operation = soapBody.getFirstElementLocalName()))
operation = operation.trim();
else operation = "";
//logger.debug( "operation " + operation );
start = xmlBody.indexOf( "Timestamp>" );
if ( -1 < start )
{
temp = xmlBody.substring( start+10 );
end = temp.indexOf( "</" );
timestamp = temp.substring( 0, end );
//logger.debug( "timestamp " + timestamp );
}
else timestamp = "";
start = xmlBody.indexOf( "Signature>" );
if ( -1 < start )
{
temp = xmlBody.substring( start+10 );
end = temp.indexOf( "</" );
msgSig = temp.substring( 0, end );
//logger.debug( "signature " + msgSig );
}
else msgSig = "";
}
}
catch( Exception e )
{
logger.error("Signature calculation failed due to: ", e);
throw new AxisFault( e.toString(), "Server.InternalError" );
}
// [B] Verify that the given signature matches what we calculated here
if (null == secretKey)
{
logger.error( "Unknown AWSAccessKeyId: [" + accessKey + "]" );
throw new AxisFault( "Unknown AWSAccessKeyId: [" + accessKey + "]", "Client.InvalidAccessKeyId" );
}
// -> for SOAP requests the Cloud API keys are sent here and only here
S3SoapAuth.verifySignature( msgSig, operation, timestamp, accessKey, secretKey );
UserContext.current().initContext( accessKey, secretKey, accessKey, "S3 SOAP request", httpObj );
return InvocationResponse.CONTINUE;
}
public void revoke(MessageContext msgContext)
{
logger.info(msgContext.getEnvelope().toString());
}
public void setName(String name)
{
//logger.debug( "setName entry S3AuthenticationHandler " + name );
this.name = name;
}
/**
* Given the user's access key, then obtain his secret key in the user database.
*
* @param accessKey - a unique string allocated for each registered user
* @return the secret key or null of no matching user found
*/
private String lookupSecretKey( String accessKey )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
UserCredentialsDao credentialDao = new UserCredentialsDao();
UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey );
if ( null == cloudKeys ) {
logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
return null;
}
else return cloudKeys.getSecretKey();
}
@Override
public void cleanup()
{
//logger.debug( "cleanup entry S3AuthenticationHandler " );
}
@Override
public void flowComplete( MessageContext arg0 )
{
//logger.debug( "flowComplete entry S3AuthenticationHandler " );
}
}

View File

@ -18,8 +18,8 @@ package com.cloud.bridge.lifecycle;
import org.apache.axis2.context.ConfigurationContext;
import org.apache.axis2.description.AxisService;
import org.apache.axis2.engine.ServiceLifeCycle;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
import com.cloud.bridge.service.ServiceProvider;
/**
* @author Kelven Yang

View File

@ -20,11 +20,7 @@ import java.util.HashSet;
import java.util.Set;
/**
<<<<<<< HEAD
* @author Kelven Yang
=======
* @author Kelven Yang, John Zucker
>>>>>>> 6472e7b... Now really adding the renamed files!
*/
public class SHost implements Serializable {
private static final long serialVersionUID = 213346565810468018L;

View File

@ -29,19 +29,34 @@ import com.cloud.bridge.util.QueryHelper;
* response to queryEntities for a particular instantation of the EntityDao generic class, as defined here.
* Any instantation of EntityDao passes in the class for which it is instantiating. For example a new instance of SBucketDao
* passes in com.cloud.bridge.model.SBucket as its clazz.
* Instantiators, providing an Entity definition, are the classes
* MHostDao,
* MHostMountDao,
* SAclDao,
* SBucketDao,
* SHostDao,
* SMetaDao,
* SObjectDao,
* SObjectItemDao,
* CloudStackSvcOfferingDao
*/
public class EntityDao<T> {
private Class<?> clazz;
private boolean isCloudStackSession = false;
// Constructor to implement CloudStackSvcOffering: see class CloudStackSvcOfferingDao
public EntityDao(Class<?> clazz, boolean isCloudStackSession) {
this.clazz = clazz;
this.isCloudStackSession = isCloudStackSession;
// Note : beginTransaction can be called multiple times
// "If a new underlying transaction is required, begin the transaction. Otherwise continue the new work in the
// context of the existing underlying transaction." from the Hibernate spec
PersistContext.beginTransaction(isCloudStackSession);
}
// Standard constructor to implement MHostDao, MHostMountDao, SAclDao, SBucketDao, SHostDao, SMetaDao, SObjectDao, SObjectItemDao
public EntityDao(Class<?> clazz) {
this.clazz = clazz;

View File

@ -96,6 +96,7 @@ import com.cloud.bridge.model.UserCredentials;
import com.cloud.bridge.persist.PersistContext;
import com.cloud.bridge.persist.dao.OfferingDao;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
import com.cloud.bridge.service.core.ec2.EC2AssociateAddress;
import com.cloud.bridge.service.core.ec2.EC2AuthorizeRevokeSecurityGroup;
import com.cloud.bridge.service.core.ec2.EC2CreateImage;

View File

@ -18,6 +18,7 @@ package com.cloud.bridge.service;
import org.apache.log4j.Logger;
import com.amazon.ec2.*;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
public class EC2SoapService implements AmazonEC2SkeletonInterface {
protected final static Logger logger = Logger.getLogger(EC2SoapService.class);

View File

@ -1,42 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
import java.io.IOException;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.UnsupportedCallbackException;
import org.apache.ws.security.WSPasswordCallback;
public class PWCBHandler implements CallbackHandler {
@SuppressWarnings("deprecation")
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
for (int i = 0; i < callbacks.length; i++) {
WSPasswordCallback pwcb = (WSPasswordCallback)callbacks[i];
String id = pwcb.getIdentifer();
if ( "client".equals(id)) {
pwcb.setPassword("apache");
}
else if("service".equals(id)) {
pwcb.setPassword("apache");
}
}
}
}

View File

@ -1,38 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
import java.io.InputStream;
import java.io.OutputStream;
import javax.activation.DataHandler;
import com.cloud.bridge.service.core.s3.S3MultipartPart;
import com.cloud.bridge.util.Tuple;
/**
* @author Kelven Yang
*/
public interface S3BucketAdapter {
void createContainer(String mountedRoot, String bucket);
void deleteContainer(String mountedRoot, String bucket);
String getBucketFolderDir(String mountedRoot, String bucket);
String saveObject(InputStream is, String mountedRoot, String bucket, String fileName);
DataHandler loadObject(String mountedRoot, String bucket, String fileName);
DataHandler loadObjectRange(String mountedRoot, String bucket, String fileName, long startPos, long endPos);
void deleteObject(String mountedRoot, String bucket, String fileName);
Tuple<String, Long> concatentateObjects(String mountedRoot, String destBucket, String fileName, String sourceBucket, S3MultipartPart[] parts, OutputStream os);
}

View File

@ -1,245 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.MalformedURLException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import javax.activation.DataHandler;
import javax.activation.DataSource;
import org.apache.log4j.Logger;
import com.cloud.bridge.service.core.s3.S3MultipartPart;
import com.cloud.bridge.service.exception.FileNotExistException;
import com.cloud.bridge.service.exception.InternalErrorException;
import com.cloud.bridge.service.exception.OutOfStorageException;
import com.cloud.bridge.util.FileRangeDataSource;
import com.cloud.bridge.util.StringHelper;
import com.cloud.bridge.util.Tuple;
/**
* @author Kelven Yang
*/
public class S3FileSystemBucketAdapter implements S3BucketAdapter {
protected final static Logger logger = Logger.getLogger(S3FileSystemBucketAdapter.class);
public S3FileSystemBucketAdapter() {
}
@Override
public void createContainer(String mountedRoot, String bucket) {
String dir = getBucketFolderDir(mountedRoot, bucket);
File container = new File(dir);
if (!container.exists()) {
if (!container.mkdirs())
throw new OutOfStorageException("Unable to create " + dir + " for bucket " + bucket);
}
}
@Override
public void deleteContainer(String mountedRoot, String bucket) {
String dir = getBucketFolderDir(mountedRoot, bucket);
File path = new File(dir);
if(!deleteDirectory(path))
throw new OutOfStorageException("Unable to delete " + dir + " for bucket " + bucket);
}
@Override
public String getBucketFolderDir(String mountedRoot, String bucket) {
String bucketFolder = getBucketFolderName(bucket);
String dir;
String separator = ""+File.separatorChar;
if(!mountedRoot.endsWith(separator))
dir = mountedRoot + separator + bucketFolder;
else
dir = mountedRoot + bucketFolder;
return dir;
}
@Override
public String saveObject(InputStream is, String mountedRoot, String bucket, String fileName)
{
FileOutputStream fos = null;
MessageDigest md5 = null;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
logger.error("Unexpected exception " + e.getMessage(), e);
throw new InternalErrorException("Unable to get MD5 MessageDigest", e);
}
File file = new File(getBucketFolderDir(mountedRoot, bucket) + File.separatorChar + fileName);
try {
// -> when versioning is off we need to rewrite the file contents
file.delete();
file.createNewFile();
fos = new FileOutputStream(file);
byte[] buffer = new byte[4096];
int len = 0;
while( (len = is.read(buffer)) > 0) {
fos.write(buffer, 0, len);
md5.update(buffer, 0, len);
}
return StringHelper.toHexString(md5.digest());
}
catch(IOException e) {
logger.error("Unexpected exception " + e.getMessage(), e);
throw new OutOfStorageException(e);
}
finally {
try {
if (null != fos) fos.close();
}
catch( Exception e ) {
logger.error("Can't close FileOutputStream " + e.getMessage(), e);
}
}
}
/**
* From a list of files (each being one part of the multipart upload), concatentate all files into a single
* object that can be accessed by normal S3 calls. This function could take a long time since a multipart is
* allowed to have upto 10,000 parts (each 5 gib long). Amazon defines that while this operation is in progress
* whitespace is sent back to the client inorder to keep the HTTP connection alive.
*
* @param mountedRoot - where both the source and dest buckets are located
* @param destBucket - resulting location of the concatenated objects
* @param fileName - resulting file name of the concatenated objects
* @param sourceBucket - special bucket used to save uploaded file parts
* @param parts - an array of file names in the sourceBucket
* @param client - if not null, then keep the servlet connection alive while this potentially long concatentation takes place
* @return Tuple with the first value the MD5 of the final object, and the second value the length of the final object
*/
@Override
public Tuple<String,Long> concatentateObjects(String mountedRoot, String destBucket, String fileName, String sourceBucket, S3MultipartPart[] parts, OutputStream client)
{
MessageDigest md5;
long totalLength = 0;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
logger.error("Unexpected exception " + e.getMessage(), e);
throw new InternalErrorException("Unable to get MD5 MessageDigest", e);
}
File file = new File(getBucketFolderDir(mountedRoot, destBucket) + File.separatorChar + fileName);
try {
// -> when versioning is off we need to rewrite the file contents
file.delete();
file.createNewFile();
final FileOutputStream fos = new FileOutputStream(file);
byte[] buffer = new byte[4096];
// -> get the input stream for the next file part
for( int i=0; i < parts.length; i++ )
{
DataHandler nextPart = loadObject( mountedRoot, sourceBucket, parts[i].getPath());
InputStream is = nextPart.getInputStream();
int len = 0;
while( (len = is.read(buffer)) > 0) {
fos.write(buffer, 0, len);
md5.update(buffer, 0, len);
totalLength += len;
}
is.close();
// -> after each file write tell the client we are still here to keep connection alive
if (null != client) {
client.write( new String(" ").getBytes());
client.flush();
}
}
fos.close();
return new Tuple<String, Long>(StringHelper.toHexString(md5.digest()), new Long(totalLength));
}
catch(IOException e) {
logger.error("concatentateObjects unexpected exception " + e.getMessage(), e);
throw new OutOfStorageException(e);
}
}
@Override
public DataHandler loadObject(String mountedRoot, String bucket, String fileName) {
File file = new File(getBucketFolderDir(mountedRoot, bucket) + File.separatorChar + fileName);
try {
return new DataHandler(file.toURL());
} catch (MalformedURLException e) {
throw new FileNotExistException("Unable to open underlying object file");
}
}
@Override
public void deleteObject(String mountedRoot, String bucket, String fileName) {
String filePath = new String( getBucketFolderDir(mountedRoot, bucket) + File.separatorChar + fileName );
File file = new File( filePath );
if (!file.delete()) {
logger.error("file: " + filePath + ", f=" + file.isFile() + ", h=" + file.isHidden() + ", e=" + file.exists() + ", w=" + file.canWrite());
throw new OutOfStorageException( "Unable to delete " + filePath + " for object deletion" );
}
}
@Override
public DataHandler loadObjectRange(String mountedRoot, String bucket, String fileName, long startPos, long endPos) {
File file = new File(getBucketFolderDir(mountedRoot, bucket) + File.separatorChar + fileName);
try {
DataSource ds = new FileRangeDataSource(file, startPos, endPos);
return new DataHandler(ds);
} catch (MalformedURLException e) {
throw new FileNotExistException("Unable to open underlying object file");
} catch(IOException e) {
throw new FileNotExistException("Unable to open underlying object file");
}
}
public static boolean deleteDirectory(File path) {
if( path.exists() ) {
File[] files = path.listFiles();
for(int i = 0; i < files.length; i++) {
if(files[i].isDirectory()) {
deleteDirectory(files[i]);
} else {
files[i].delete();
}
}
}
return path.delete();
}
private String getBucketFolderName(String bucket) {
// temporary
String name = bucket.replace(' ', '_');
name = bucket.replace('\\', '-');
name = bucket.replace('/', '-');
return name;
}
}

View File

@ -1,115 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
import org.apache.axis2.AxisFault;
import org.apache.log4j.Logger;
import com.amazon.s3.*;
/**
* @author Kelven Yang
*/
public class S3SoapService implements AmazonS3SkeletonInterface {
protected final static Logger logger = Logger.getLogger(S3SoapService.class);
public GetBucketLoggingStatusResponse getBucketLoggingStatus(GetBucketLoggingStatus getBucketLoggingStatus0) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getBucketLoggingStatus(getBucketLoggingStatus0);
}
public CopyObjectResponse copyObject(com.amazon.s3.CopyObject copyObject2) throws AxisFault {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.copyObject(copyObject2);
}
public GetBucketAccessControlPolicyResponse getBucketAccessControlPolicy (
GetBucketAccessControlPolicy getBucketAccessControlPolicy4) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getBucketAccessControlPolicy (getBucketAccessControlPolicy4);
}
public ListBucketResponse listBucket (ListBucket listBucket6) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.listBucket (listBucket6);
}
public PutObjectResponse putObject(PutObject putObject8) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.putObject(putObject8);
}
public CreateBucketResponse createBucket (CreateBucket createBucket) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.createBucket(createBucket);
}
public ListAllMyBucketsResponse listAllMyBuckets (
ListAllMyBuckets listAllMyBuckets12) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.listAllMyBuckets (listAllMyBuckets12);
}
public GetObjectResponse getObject(com.amazon.s3.GetObject getObject14) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getObject(getObject14);
}
public DeleteBucketResponse deleteBucket(DeleteBucket deleteBucket16) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.deleteBucket(deleteBucket16);
}
public SetBucketLoggingStatusResponse setBucketLoggingStatus(
SetBucketLoggingStatus setBucketLoggingStatus18) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.setBucketLoggingStatus(setBucketLoggingStatus18);
}
public GetObjectAccessControlPolicyResponse getObjectAccessControlPolicy(
GetObjectAccessControlPolicy getObjectAccessControlPolicy20) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getObjectAccessControlPolicy(getObjectAccessControlPolicy20);
}
public DeleteObjectResponse deleteObject (DeleteObject deleteObject22) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.deleteObject (deleteObject22);
}
public SetBucketAccessControlPolicyResponse setBucketAccessControlPolicy(
SetBucketAccessControlPolicy setBucketAccessControlPolicy24) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.setBucketAccessControlPolicy(setBucketAccessControlPolicy24);
}
public SetObjectAccessControlPolicyResponse setObjectAccessControlPolicy(
SetObjectAccessControlPolicy setObjectAccessControlPolicy26) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.setObjectAccessControlPolicy(setObjectAccessControlPolicy26);
}
public PutObjectInlineResponse putObjectInline (PutObjectInline putObjectInline28) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.putObjectInline (putObjectInline28);
}
public GetObjectExtendedResponse getObjectExtended(GetObjectExtended getObjectExtended30) {
AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class);
return s3Service.getObjectExtended(getObjectExtended30);
}
}

View File

@ -1,700 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
import java.io.IOException;
import java.util.Calendar;
import org.apache.axis2.AxisFault;
import org.apache.log4j.Logger;
import com.amazon.s3.AccessControlList;
import com.amazon.s3.AccessControlPolicy;
import com.amazon.s3.AmazonS3SkeletonInterface;
import com.amazon.s3.CanonicalUser;
import com.amazon.s3.CopyObject;
import com.amazon.s3.CopyObjectResult;
import com.amazon.s3.Group;
import com.amazon.s3.CopyObjectResponse;
import com.amazon.s3.CreateBucket;
import com.amazon.s3.CreateBucketResponse;
import com.amazon.s3.CreateBucketResult;
import com.amazon.s3.DeleteBucket;
import com.amazon.s3.DeleteBucketResponse;
import com.amazon.s3.DeleteObject;
import com.amazon.s3.DeleteObjectResponse;
import com.amazon.s3.GetBucketAccessControlPolicy;
import com.amazon.s3.GetBucketAccessControlPolicyResponse;
import com.amazon.s3.GetBucketLoggingStatus;
import com.amazon.s3.GetBucketLoggingStatusResponse;
import com.amazon.s3.GetObject;
import com.amazon.s3.GetObjectAccessControlPolicy;
import com.amazon.s3.GetObjectAccessControlPolicyResponse;
import com.amazon.s3.GetObjectExtended;
import com.amazon.s3.GetObjectExtendedResponse;
import com.amazon.s3.GetObjectResponse;
import com.amazon.s3.GetObjectResult;
import com.amazon.s3.Grant;
import com.amazon.s3.Grantee;
import com.amazon.s3.ListAllMyBuckets;
import com.amazon.s3.ListAllMyBucketsEntry;
import com.amazon.s3.ListAllMyBucketsList;
import com.amazon.s3.ListAllMyBucketsResponse;
import com.amazon.s3.ListAllMyBucketsResult;
import com.amazon.s3.ListBucket;
import com.amazon.s3.ListBucketResponse;
import com.amazon.s3.ListBucketResult;
import com.amazon.s3.ListEntry;
import com.amazon.s3.MetadataDirective;
import com.amazon.s3.MetadataEntry;
import com.amazon.s3.Permission;
import com.amazon.s3.PrefixEntry;
import com.amazon.s3.PutObject;
import com.amazon.s3.PutObjectInline;
import com.amazon.s3.PutObjectInlineResponse;
import com.amazon.s3.PutObjectResponse;
import com.amazon.s3.PutObjectResult;
import com.amazon.s3.SetBucketAccessControlPolicy;
import com.amazon.s3.SetBucketAccessControlPolicyResponse;
import com.amazon.s3.SetBucketLoggingStatus;
import com.amazon.s3.SetBucketLoggingStatusResponse;
import com.amazon.s3.SetObjectAccessControlPolicy;
import com.amazon.s3.SetObjectAccessControlPolicyResponse;
import com.amazon.s3.Status;
import com.amazon.s3.StorageClass;
import com.cloud.bridge.model.SAcl;
import com.cloud.bridge.service.core.s3.S3AccessControlList;
import com.cloud.bridge.service.core.s3.S3AccessControlPolicy;
import com.cloud.bridge.service.core.s3.S3CanonicalUser;
import com.cloud.bridge.service.core.s3.S3ConditionalHeaders;
import com.cloud.bridge.service.core.s3.S3CopyObjectRequest;
import com.cloud.bridge.service.core.s3.S3CopyObjectResponse;
import com.cloud.bridge.service.core.s3.S3CreateBucketRequest;
import com.cloud.bridge.service.core.s3.S3CreateBucketResponse;
import com.cloud.bridge.service.core.s3.S3DeleteBucketRequest;
import com.cloud.bridge.service.core.s3.S3DeleteObjectRequest;
import com.cloud.bridge.service.core.s3.S3Engine;
import com.cloud.bridge.service.core.s3.S3GetBucketAccessControlPolicyRequest;
import com.cloud.bridge.service.core.s3.S3GetObjectAccessControlPolicyRequest;
import com.cloud.bridge.service.core.s3.S3GetObjectRequest;
import com.cloud.bridge.service.core.s3.S3GetObjectResponse;
import com.cloud.bridge.service.core.s3.S3Grant;
import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsEntry;
import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsRequest;
import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsResponse;
import com.cloud.bridge.service.core.s3.S3ListBucketObjectEntry;
import com.cloud.bridge.service.core.s3.S3ListBucketPrefixEntry;
import com.cloud.bridge.service.core.s3.S3ListBucketRequest;
import com.cloud.bridge.service.core.s3.S3ListBucketResponse;
import com.cloud.bridge.service.core.s3.S3MetaDataEntry;
import com.cloud.bridge.service.core.s3.S3PutObjectInlineRequest;
import com.cloud.bridge.service.core.s3.S3PutObjectInlineResponse;
import com.cloud.bridge.service.core.s3.S3Response;
import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest;
import com.cloud.bridge.service.core.s3.S3SetObjectAccessControlPolicyRequest;
import com.cloud.bridge.service.exception.InternalErrorException;
public class S3SoapServiceImpl implements AmazonS3SkeletonInterface {
protected final static Logger logger = Logger.getLogger(S3SoapServiceImpl.class);
private S3Engine engine;
public S3SoapServiceImpl(S3Engine engine) {
this.engine = engine;
}
public GetBucketLoggingStatusResponse getBucketLoggingStatus(
GetBucketLoggingStatus getBucketLoggingStatus) {
throw new UnsupportedOperationException("Unsupported API");
}
public SetBucketLoggingStatusResponse setBucketLoggingStatus(SetBucketLoggingStatus setBucketLoggingStatus) {
throw new UnsupportedOperationException("Unsupported API");
}
public CopyObjectResponse copyObject(CopyObject copyObject) throws AxisFault {
S3CopyObjectRequest request = new S3CopyObjectRequest();
request.setSourceBucketName(copyObject.getSourceBucket());
request.setSourceKey(copyObject.getSourceKey());
request.setDestinationBucketName(copyObject.getDestinationBucket());
request.setDestinationKey(copyObject.getDestinationKey());
MetadataDirective mdd = copyObject.getMetadataDirective();
if (null != mdd) request.setDataDirective(mdd.getValue());
request.setMetaEntries(toEngineMetaEntries(copyObject.getMetadata()));
request.setAcl(toEngineAccessControlList(copyObject.getAccessControlList()));
S3ConditionalHeaders conds = new S3ConditionalHeaders();
conds.setModifiedSince(copyObject.getCopySourceIfModifiedSince());
conds.setUnModifiedSince(copyObject.getCopySourceIfUnmodifiedSince());
conds.setMatch(copyObject.getCopySourceIfMatch());
conds.setNoneMatch(copyObject.getCopySourceIfNoneMatch());
request.setConditions(conds);
return toCopyObjectResponse(engine.handleRequest(request));
}
public GetBucketAccessControlPolicyResponse getBucketAccessControlPolicy(
GetBucketAccessControlPolicy getBucketAccessControlPolicy) {
// after authentication, we should setup user context
return toGetBucketAccessControlPolicyResponse(engine.handleRequest(
toEngineGetBucketAccessControlPolicyRequest(getBucketAccessControlPolicy)));
}
private S3GetBucketAccessControlPolicyRequest toEngineGetBucketAccessControlPolicyRequest(
GetBucketAccessControlPolicy getBucketAccessControlPolicy) {
S3GetBucketAccessControlPolicyRequest request = new S3GetBucketAccessControlPolicyRequest();
request.setAccessKey(getBucketAccessControlPolicy.getAWSAccessKeyId());
request.setRequestTimestamp(getBucketAccessControlPolicy.getTimestamp());
request.setSignature(getBucketAccessControlPolicy.getSignature());
request.setBucketName(getBucketAccessControlPolicy.getBucket());
return request;
}
public static GetBucketAccessControlPolicyResponse toGetBucketAccessControlPolicyResponse(S3AccessControlPolicy policy) {
GetBucketAccessControlPolicyResponse response = new GetBucketAccessControlPolicyResponse();
response.setGetBucketAccessControlPolicyResponse(toAccessControlPolicy(policy));
return response;
}
public SetBucketAccessControlPolicyResponse setBucketAccessControlPolicy(SetBucketAccessControlPolicy setBucketAccessControlPolicy) {
S3SetBucketAccessControlPolicyRequest request = new S3SetBucketAccessControlPolicyRequest();
request.setAccessKey(setBucketAccessControlPolicy.getAWSAccessKeyId());
request.setRequestTimestamp(setBucketAccessControlPolicy.getTimestamp());
request.setSignature(setBucketAccessControlPolicy.getSignature());
request.setBucketName(setBucketAccessControlPolicy.getBucket());
request.setAcl(toEngineAccessControlList(setBucketAccessControlPolicy.getAccessControlList()));
S3Response basicResponse = engine.handleRequest(request);
SetBucketAccessControlPolicyResponse response = new SetBucketAccessControlPolicyResponse();
return response;
}
public ListBucketResponse listBucket (ListBucket listBucket) {
// after authentication, we should setup user context
return toListBucketResponse(engine.listBucketContents(toEngineListBucketRequest(listBucket), false));
}
private S3ListBucketRequest toEngineListBucketRequest(ListBucket listBucket) {
S3ListBucketRequest request = new S3ListBucketRequest();
request.setAccessKey(listBucket.getAWSAccessKeyId());
request.setRequestTimestamp(listBucket.getTimestamp());
request.setSignature(listBucket.getSignature());
request.setBucketName(listBucket.getBucket());
request.setDelimiter(listBucket.getDelimiter());
request.setMarker(listBucket.getMarker());
request.setMaxKeys(listBucket.getMaxKeys());
request.setPrefix(listBucket.getPrefix());
return request;
}
public static ListBucketResponse toListBucketResponse(S3ListBucketResponse engineResponse) {
ListBucketResponse response = new ListBucketResponse();
ListBucketResult result = new ListBucketResult();
result.setName(engineResponse.getBucketName());
result.setDelimiter(engineResponse.getDelimiter());
result.setPrefix(engineResponse.getPrefix());
result.setMarker(engineResponse.getMarker());
result.setMaxKeys(engineResponse.getMaxKeys());
result.setIsTruncated(engineResponse.isTruncated());
result.setNextMarker(engineResponse.getNextMarker());
result.setCommonPrefixes(toPrefixEntry(engineResponse.getCommonPrefixes()));
result.setContents(toListEntry(engineResponse.getContents()));
response.setListBucketResponse(result);
return response;
}
private static PrefixEntry[] toPrefixEntry(S3ListBucketPrefixEntry[] engineEntries) {
if(engineEntries != null) {
PrefixEntry[] entries = new PrefixEntry[engineEntries.length];
for(int i = 0; i < engineEntries.length; i++) {
entries[i] = new PrefixEntry();
entries[i].setPrefix(engineEntries[i].getPrefix());
}
return entries;
}
return null;
}
private static ListEntry[] toListEntry(S3ListBucketObjectEntry[] engineEntries) {
if(engineEntries != null) {
ListEntry[] entries = new ListEntry[engineEntries.length];
for(int i = 0; i < engineEntries.length; i++) {
entries[i] = new ListEntry();
entries[i].setETag(engineEntries[i].getETag());
entries[i].setKey(engineEntries[i].getKey());
entries[i].setLastModified(engineEntries[i].getLastModified());
entries[i].setSize(engineEntries[i].getSize());
entries[i].setStorageClass(StorageClass.STANDARD);
CanonicalUser owner = new CanonicalUser();
owner.setID(engineEntries[i].getOwnerCanonicalId());
owner.setDisplayName(engineEntries[i].getOwnerDisplayName());
entries[i].setOwner(owner);
}
return entries;
}
return null;
}
public PutObjectResponse putObject(PutObject putObject) {
//TODO : fill this with the necessary business logic
throw new UnsupportedOperationException("Please implement " + this.getClass().getName() + "#putObject");
}
public CreateBucketResponse createBucket (CreateBucket createBucket) {
return toCreateBucketResponse(engine.handleRequest(toEngineCreateBucketRequest(createBucket)));
}
private S3CreateBucketRequest toEngineCreateBucketRequest(CreateBucket createBucket) {
S3CreateBucketRequest request = new S3CreateBucketRequest();
request.setAccessKey(createBucket.getAWSAccessKeyId());
request.setRequestTimestamp(createBucket.getTimestamp());
request.setSignature(createBucket.getSignature());
request.setBucketName(createBucket.getBucket());
request.setAcl(toEngineAccessControlList(createBucket.getAccessControlList()));
return request;
}
private CreateBucketResponse toCreateBucketResponse(S3CreateBucketResponse engineResponse) {
CreateBucketResponse response = new CreateBucketResponse();
CreateBucketResult result = new CreateBucketResult();
result.setBucketName(engineResponse.getBucketName());
response.setCreateBucketReturn(result);
return response;
}
public ListAllMyBucketsResponse listAllMyBuckets (ListAllMyBuckets listAllMyBuckets) {
return toListAllMyBucketsResponse(engine.handleRequest(toEngineListAllMyBucketsRequest(listAllMyBuckets)));
}
private S3ListAllMyBucketsRequest toEngineListAllMyBucketsRequest(ListAllMyBuckets listAllMyBuckets) {
S3ListAllMyBucketsRequest request = new S3ListAllMyBucketsRequest();
request.setAccessKey(listAllMyBuckets.getAWSAccessKeyId());
request.setRequestTimestamp(listAllMyBuckets.getTimestamp());
request.setSignature(listAllMyBuckets.getSignature());
return request;
}
public static ListAllMyBucketsResponse toListAllMyBucketsResponse(S3ListAllMyBucketsResponse engineResponse) {
ListAllMyBucketsResponse response = new ListAllMyBucketsResponse();
ListAllMyBucketsResult result = new ListAllMyBucketsResult();
ListAllMyBucketsEntry[] entries = null;
S3CanonicalUser ownerEngine = engineResponse.getOwner();
CanonicalUser owner = new CanonicalUser();
owner.setID(ownerEngine.getID());
owner.setDisplayName(ownerEngine.getDisplayName());
result.setOwner(owner);
S3ListAllMyBucketsEntry[] engineEntries = engineResponse.getBuckets();
if (engineEntries != null) {
entries = new ListAllMyBucketsEntry[engineEntries.length];
for(int i = 0; i < engineEntries.length; i++) {
entries[i] = new ListAllMyBucketsEntry();
entries[i].setName(engineEntries[i].getName());
entries[i].setCreationDate(engineEntries[i].getCreationDate());
}
ListAllMyBucketsList list = new ListAllMyBucketsList();
list.setBucket(entries);
result.setBuckets(list);
}
response.setListAllMyBucketsResponse(result);
return response;
}
public DeleteBucketResponse deleteBucket(DeleteBucket deleteBucket) {
return toDeleteBucketResponse(engine.handleRequest(toEngineDeleteBucketRequest(deleteBucket)));
}
private S3DeleteBucketRequest toEngineDeleteBucketRequest(DeleteBucket deleteBucket) {
S3DeleteBucketRequest request = new S3DeleteBucketRequest();
request.setAccessKey(deleteBucket.getAWSAccessKeyId());
request.setRequestTimestamp(deleteBucket.getTimestamp());
request.setSignature(deleteBucket.getSignature());
request.setBucketName(deleteBucket.getBucket());
return request;
}
private DeleteBucketResponse toDeleteBucketResponse(S3Response engineResponse) {
DeleteBucketResponse response = new DeleteBucketResponse();
Status status = new Status();
status.setCode(engineResponse.getResultCode());
status.setDescription(engineResponse.getResultDescription());
response.setDeleteBucketResponse(status);
return response;
}
public GetObjectResponse getObject(com.amazon.s3.GetObject getObject) {
return toGetObjectResponse(engine.handleRequest(toEngineGetObjectRequest(getObject)));
}
public GetObjectExtendedResponse getObjectExtended(GetObjectExtended getObjectExtended) {
return toGetObjectExtendedResponse(engine.handleRequest(toEngineGetObjectRequest(getObjectExtended)));
}
private S3GetObjectRequest toEngineGetObjectRequest(GetObject getObject)
{
S3GetObjectRequest request = new S3GetObjectRequest();
request.setAccessKey(getObject.getAWSAccessKeyId());
request.setRequestTimestamp(getObject.getTimestamp());
request.setSignature(getObject.getSignature());
request.setBucketName(getObject.getBucket());
request.setKey(getObject.getKey());
request.setReturnData(getObject.getGetData());
request.setReturnMetadata(getObject.getGetMetadata());
request.setInlineData(getObject.getInlineData());
return request;
}
private S3GetObjectRequest toEngineGetObjectRequest(GetObjectExtended getObjectExtended) {
S3GetObjectRequest request = new S3GetObjectRequest();
request.setAccessKey(getObjectExtended.getAWSAccessKeyId());
request.setRequestTimestamp(getObjectExtended.getTimestamp());
request.setSignature(getObjectExtended.getSignature());
request.setBucketName(getObjectExtended.getBucket());
request.setKey(getObjectExtended.getKey());
request.setReturnData(getObjectExtended.getGetData());
request.setReturnMetadata(getObjectExtended.getGetMetadata());
request.setInlineData(getObjectExtended.getInlineData());
S3ConditionalHeaders conds = new S3ConditionalHeaders();
conds.setModifiedSince(getObjectExtended.getIfModifiedSince());
conds.setUnModifiedSince(getObjectExtended.getIfUnmodifiedSince());
conds.setMatch(getObjectExtended.getIfMatch());
conds.setNoneMatch(getObjectExtended.getIfNoneMatch());
request.setConditions(conds);
request.setByteRangeStart(getObjectExtended.getByteRangeStart());
request.setByteRangeEnd(getObjectExtended.getByteRangeEnd());
request.setReturnCompleteObjectOnConditionFailure(getObjectExtended.getReturnCompleteObjectOnConditionFailure());
return request;
}
private GetObjectResponse toGetObjectResponse(S3GetObjectResponse engineResponse) {
GetObjectResponse response = new GetObjectResponse();
int resultCode = engineResponse.getResultCode();
GetObjectResult result = new GetObjectResult();
Status param1 = new Status();
param1.setCode( resultCode);
param1.setDescription( engineResponse.getResultDescription());
result.setStatus( param1 );
if ( 200 == resultCode )
{
result.setData(engineResponse.getData());
result.setETag( engineResponse.getETag());
result.setMetadata(toMetadataEntry(engineResponse.getMetaEntries()));
result.setLastModified( engineResponse.getLastModified());
}
else
{ result.setETag( "" );
result.setLastModified( Calendar.getInstance());
}
response.setGetObjectResponse(result);
return response;
}
private GetObjectExtendedResponse toGetObjectExtendedResponse(S3GetObjectResponse engineResponse) {
GetObjectExtendedResponse response = new GetObjectExtendedResponse();
int resultCode = engineResponse.getResultCode();
GetObjectResult result = new GetObjectResult();
Status param1 = new Status();
param1.setCode( resultCode );
param1.setDescription( engineResponse.getResultDescription());
result.setStatus( param1 );
if ( 200 == resultCode || 206 == resultCode )
{
result.setData(engineResponse.getData());
result.setETag( engineResponse.getETag());
result.setMetadata(toMetadataEntry(engineResponse.getMetaEntries()));
result.setLastModified( engineResponse.getLastModified());
}
else
{ result.setETag( "" );
result.setLastModified( Calendar.getInstance());
}
response.setGetObjectResponse(result);
return response;
}
private MetadataEntry[] toMetadataEntry(S3MetaDataEntry[] engineEntries) {
if(engineEntries != null) {
MetadataEntry[] entries = new MetadataEntry[engineEntries.length];
for(int i = 0; i < engineEntries.length; i++) {
entries[i] = new MetadataEntry();
entries[i].setName(engineEntries[i].getName());
entries[i].setValue(engineEntries[i].getValue());
}
return entries;
}
return null;
}
public GetObjectAccessControlPolicyResponse getObjectAccessControlPolicy(
GetObjectAccessControlPolicy getObjectAccessControlPolicy) {
return toGetObjectAccessControlPolicyResponse(engine.handleRequest(
toEngineGetObjectAccessControlPolicyRequest(getObjectAccessControlPolicy)));
}
private S3GetObjectAccessControlPolicyRequest toEngineGetObjectAccessControlPolicyRequest(
GetObjectAccessControlPolicy getObjectAccessControlPolicy) {
S3GetObjectAccessControlPolicyRequest request = new S3GetObjectAccessControlPolicyRequest();
request.setAccessKey(getObjectAccessControlPolicy.getAWSAccessKeyId());
request.setRequestTimestamp(getObjectAccessControlPolicy.getTimestamp());
request.setSignature(getObjectAccessControlPolicy.getSignature());
request.setBucketName(getObjectAccessControlPolicy.getBucket());
request.setKey(getObjectAccessControlPolicy.getKey());
return request;
}
public static GetObjectAccessControlPolicyResponse toGetObjectAccessControlPolicyResponse(S3AccessControlPolicy policy) {
GetObjectAccessControlPolicyResponse response = new GetObjectAccessControlPolicyResponse();
response.setGetObjectAccessControlPolicyResponse(toAccessControlPolicy(policy));
return response;
}
private static AccessControlPolicy toAccessControlPolicy(S3AccessControlPolicy enginePolicy) {
AccessControlPolicy policy = new AccessControlPolicy();
CanonicalUser owner = new CanonicalUser();
owner.setID(enginePolicy.getOwner().getID());
owner.setDisplayName(enginePolicy.getOwner().getDisplayName());
policy.setOwner(owner);
AccessControlList acl = new AccessControlList();
acl.setGrant(toGrants(enginePolicy.getGrants()));
policy.setAccessControlList(acl);
return policy;
}
public DeleteObjectResponse deleteObject (DeleteObject deleteObject) {
return toDeleteObjectResponse(engine.handleRequest(toEngineDeleteObjectRequest(deleteObject)));
}
private S3DeleteObjectRequest toEngineDeleteObjectRequest(DeleteObject deleteObject) {
S3DeleteObjectRequest request = new S3DeleteObjectRequest();
request.setAccessKey(deleteObject.getAWSAccessKeyId());
request.setRequestTimestamp(deleteObject.getTimestamp());
request.setSignature(deleteObject.getSignature());
request.setBucketName(deleteObject.getBucket());
request.setKey(deleteObject.getKey());
return request;
}
private DeleteObjectResponse toDeleteObjectResponse(S3Response engineResponse) {
DeleteObjectResponse response = new DeleteObjectResponse();
Status status = new Status();
status.setCode(engineResponse.getResultCode());
status.setDescription(engineResponse.getResultDescription());
response.setDeleteObjectResponse(status);
return response;
}
public SetObjectAccessControlPolicyResponse setObjectAccessControlPolicy(SetObjectAccessControlPolicy setObjectAccessControlPolicy)
{
S3SetObjectAccessControlPolicyRequest request = new S3SetObjectAccessControlPolicyRequest();
request.setAccessKey(setObjectAccessControlPolicy.getAWSAccessKeyId());
request.setRequestTimestamp(setObjectAccessControlPolicy.getTimestamp());
request.setSignature(setObjectAccessControlPolicy.getSignature());
request.setBucketName(setObjectAccessControlPolicy.getBucket());
request.setKey(setObjectAccessControlPolicy.getKey());
request.setAcl(toEngineAccessControlList(setObjectAccessControlPolicy.getAccessControlList()));
engine.handleRequest(request);
SetObjectAccessControlPolicyResponse response = new SetObjectAccessControlPolicyResponse();
return response;
}
public PutObjectInlineResponse putObjectInline (PutObjectInline putObjectInline) {
return toPutObjectInlineResponse(engine.handleRequest(toEnginePutObjectInlineRequest(putObjectInline)));
}
private S3PutObjectInlineRequest toEnginePutObjectInlineRequest(PutObjectInline putObjectInline) {
S3PutObjectInlineRequest request = new S3PutObjectInlineRequest();
request.setAccessKey(putObjectInline.getAWSAccessKeyId());
request.setRequestTimestamp(putObjectInline.getTimestamp());
request.setSignature(putObjectInline.getSignature());
request.setBucketName(putObjectInline.getBucket());
request.setContentLength(putObjectInline.getContentLength());
request.setKey(putObjectInline.getKey());
request.setData(putObjectInline.getData());
request.setMetaEntries(toEngineMetaEntries(putObjectInline.getMetadata()));
request.setAcl(toEngineAccessControlList(putObjectInline.getAccessControlList()));
return request;
}
private S3MetaDataEntry[] toEngineMetaEntries(MetadataEntry[] metaEntries) {
if(metaEntries != null) {
S3MetaDataEntry[] engineMetaEntries = new S3MetaDataEntry[metaEntries.length];
for(int i = 0; i < metaEntries.length; i++) {
engineMetaEntries[i] = new S3MetaDataEntry();
engineMetaEntries[i].setName(metaEntries[i].getName());
engineMetaEntries[i].setValue(metaEntries[i].getValue());
}
return engineMetaEntries;
}
return null;
}
private S3AccessControlList toEngineAccessControlList(AccessControlList acl)
{
if (acl == null) return null;
S3AccessControlList engineAcl = new S3AccessControlList();
Grant[] grants = acl.getGrant();
if (grants != null)
{
for (Grant grant: grants)
{
S3Grant engineGrant = new S3Grant();
Grantee grantee = grant.getGrantee();
if (grantee instanceof CanonicalUser)
{
engineGrant.setGrantee(SAcl.GRANTEE_USER);
engineGrant.setCanonicalUserID(((CanonicalUser)grantee).getID());
}
else if (grantee instanceof Group)
{
Group temp = (Group)grantee;
String uri = temp.getURI();
if ( uri.equalsIgnoreCase( "http://acs.amazonaws.com/groups/global/AllUsers" )) {
// -> this allows all public unauthenticated access based on permission given
engineGrant.setGrantee(SAcl.GRANTEE_ALLUSERS);
engineGrant.setCanonicalUserID( "*" );
}
else if (uri.equalsIgnoreCase( "http://acs.amazonaws.com/groups/global/Authenticated" )) {
// -> this allows any authenticated user access based on permission given
engineGrant.setGrantee(SAcl.GRANTEE_AUTHENTICATED);
engineGrant.setCanonicalUserID( "A" );
}
else throw new UnsupportedOperationException("Unsupported grantee group URI: " + uri );
}
else throw new UnsupportedOperationException("Unsupported grantee type: " + grantee.getClass().getCanonicalName());
Permission permission = grant.getPermission();
String permissionValue = permission.getValue();
if(permissionValue.equalsIgnoreCase("READ")) {
engineGrant.setPermission(SAcl.PERMISSION_READ);
} else if(permissionValue.equalsIgnoreCase("WRITE")) {
engineGrant.setPermission(SAcl.PERMISSION_WRITE);
} else if(permissionValue.equalsIgnoreCase("READ_ACP")) {
engineGrant.setPermission(SAcl.PERMISSION_READ_ACL);
} else if(permissionValue.equalsIgnoreCase("WRITE_ACP")) {
engineGrant.setPermission(SAcl.PERMISSION_WRITE_ACL);
} else if(permissionValue.equalsIgnoreCase("FULL_CONTROL")) {
engineGrant.setPermission(SAcl.PERMISSION_FULL);
} else {
throw new UnsupportedOperationException("Unsupported permission: " + permissionValue);
}
engineAcl.addGrant(engineGrant);
}
}
return engineAcl;
}
private static Grant[] toGrants(S3Grant[] engineGrants) {
Grantee grantee = null;
Grant[] grants = null;
if (engineGrants != null && 0 < engineGrants.length)
{
grants = new Grant[engineGrants.length];
for(int i = 0; i < engineGrants.length; i++)
{
grants[i] = new Grant();
switch( engineGrants[i].getGrantee()) {
case SAcl.GRANTEE_USER :
grantee = new CanonicalUser();
((CanonicalUser)grantee).setID(engineGrants[i].getCanonicalUserID());
((CanonicalUser)grantee).setDisplayName("TODO");
grants[i].setGrantee(grantee);
break;
case SAcl.GRANTEE_ALLUSERS:
grantee = new Group();
((Group)grantee).setURI( "http://acs.amazonaws.com/groups/global/AllUsers" );
grants[i].setGrantee(grantee);
break;
case SAcl.GRANTEE_AUTHENTICATED:
grantee = new Group();
((Group)grantee).setURI( "http://acs.amazonaws.com/groups/global/Authenticated" );
grants[i].setGrantee(grantee);
break;
default :
throw new InternalErrorException("Unsupported grantee type");
}
switch( engineGrants[i].getPermission()) {
case SAcl.PERMISSION_READ: grants[i].setPermission(Permission.READ); break;
case SAcl.PERMISSION_WRITE: grants[i].setPermission(Permission.WRITE); break;
case SAcl.PERMISSION_READ_ACL: grants[i].setPermission(Permission.READ_ACP); break;
case SAcl.PERMISSION_WRITE_ACL: grants[i].setPermission(Permission.WRITE_ACP); break;
case SAcl.PERMISSION_FULL: grants[i].setPermission(Permission.FULL_CONTROL); break;
}
}
return grants;
}
return null;
}
private PutObjectInlineResponse toPutObjectInlineResponse(S3PutObjectInlineResponse engineResponse) {
PutObjectInlineResponse response = new PutObjectInlineResponse();
PutObjectResult result = new PutObjectResult();
result.setETag(engineResponse.getETag());
result.setLastModified(engineResponse.getLastModified());
response.setPutObjectInlineResponse(result);
return response;
}
public static CopyObjectResponse toCopyObjectResponse(S3CopyObjectResponse engineResponse) throws AxisFault {
CopyObjectResponse response = new CopyObjectResponse();
int resultCode = engineResponse.getResultCode();
CopyObjectResult result = new CopyObjectResult();
if ( 300 <= resultCode )
{
String description = engineResponse.getResultDescription();
throw new AxisFault( "" + resultCode, (null == description ? "" : description));
}
result.setETag( "\"" + engineResponse.getETag() + "\"" );
result.setLastModified(engineResponse.getLastModified());
response.setCopyObjectResult(result);
return response;
}
}

View File

@ -1,358 +0,0 @@
/**
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
* This software is licensed under the GNU General Public License v3 or later.
*
* It is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.cloud.bridge.service;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetAddress;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.axis2.AxisFault;
import org.apache.log4j.Logger;
import org.apache.log4j.xml.DOMConfigurator;
import org.hibernate.SessionException;
import com.amazon.s3.AmazonS3SkeletonInterface;
import com.amazon.ec2.AmazonEC2SkeletonInterface;
import com.cloud.bridge.model.MHost;
import com.cloud.bridge.model.SHost;
import com.cloud.bridge.model.UserCredentials;
import com.cloud.bridge.persist.PersistContext;
import com.cloud.bridge.persist.PersistException;
import com.cloud.bridge.persist.dao.MHostDao;
import com.cloud.bridge.persist.dao.SHostDao;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.core.ec2.EC2Engine;
import com.cloud.bridge.service.core.s3.S3BucketPolicy;
import com.cloud.bridge.service.core.s3.S3Engine;
import com.cloud.bridge.service.exception.ConfigurationException;
import com.cloud.bridge.util.ConfigurationHelper;
import com.cloud.bridge.util.DateHelper;
import com.cloud.bridge.util.NetHelper;
import com.cloud.bridge.util.Tuple;
/**
* @author Kelven Yang
*/
public class ServiceProvider {
protected final static Logger logger = Logger.getLogger(ServiceProvider.class);
public final static long HEARTBEAT_INTERVAL = 10000;
private static ServiceProvider instance;
private Map<Class<?>, Object> serviceMap = new HashMap<Class<?>, Object>();
private Timer timer = new Timer();
private MHost mhost;
private Properties properties;
private boolean useSubDomain = false; // use DNS sub domain for bucket name
private String serviceEndpoint = null;
private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts
private String masterDomain = ".s3.amazonaws.com";
private S3Engine engine;
private EC2Engine EC2_engine = null;
// -> cache Bucket Policies here so we don't have to load from db on every access
private Map<String,S3BucketPolicy> policyMap = new HashMap<String,S3BucketPolicy>();
protected ServiceProvider() throws IOException {
// register service implementation object
engine = new S3Engine();
EC2_engine = new EC2Engine();
serviceMap.put(AmazonS3SkeletonInterface.class, new S3SoapServiceImpl(engine));
serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine));
}
public synchronized static ServiceProvider getInstance() {
if(instance == null)
{
try {
instance = new ServiceProvider();
instance.initialize();
PersistContext.commitTransaction();
} catch(Throwable e) {
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
PersistContext.closeSession();
}
}
return instance;
}
public long getManagementHostId() {
// we want to limit mhost within its own session, id of the value will be returned
long mhostId = 0;
if(mhost != null)
mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L;
return mhostId;
}
/**
* We return a tuple to distinguish between two cases:
* (1) there is no entry in the map for bucketName, and (2) there is a null entry
* in the map for bucketName. In case 2, the database was inspected for the
* bucket policy but it had none so we remember it here to reduce database lookups.
* @param bucketName
* @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined
* even if its set at null.
*/
public Tuple<S3BucketPolicy,Integer> getBucketPolicy(String bucketName) {
if (policyMap.containsKey( bucketName )) {
S3BucketPolicy policy = policyMap.get( bucketName );
return new Tuple<S3BucketPolicy,Integer>( policy, 0 );
}
else return new Tuple<S3BucketPolicy,Integer>( null, -1 );
}
/**
* The policy parameter can be set to null, which means that there is no policy
* for the bucket so a database lookup is not necessary.
*
* @param bucketName
* @param policy
*/
public void setBucketPolicy(String bucketName, S3BucketPolicy policy) {
policyMap.put(bucketName, policy);
}
public void deleteBucketPolicy(String bucketName) {
policyMap.remove(bucketName);
}
public S3Engine getS3Engine() {
return engine;
}
public EC2Engine getEC2Engine() {
return EC2_engine;
}
public String getMasterDomain() {
return masterDomain;
}
public boolean getUseSubDomain() {
return useSubDomain;
}
public String getServiceEndpoint() {
return serviceEndpoint;
}
public String getMultipartDir() {
return multipartDir;
}
public Properties getStartupProperties() {
return properties;
}
public UserInfo getUserInfo(String accessKey)
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException {
UserInfo info = new UserInfo();
UserCredentialsDao credentialDao = new UserCredentialsDao();
UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey );
if ( null == cloudKeys ) {
logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
return null;
} else {
info.setAccessKey( accessKey );
info.setSecretKey( cloudKeys.getSecretKey());
info.setCanonicalUserId(accessKey);
info.setDescription( "S3 REST request" );
return info;
}
}
protected void initialize() {
if(logger.isInfoEnabled())
logger.info("Initializing ServiceProvider...");
File file = ConfigurationHelper.findConfigurationFile("log4j-cloud.xml");
if(file != null) {
System.out.println("Log4j configuration from : " + file.getAbsolutePath());
DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000);
} else {
System.out.println("Configure log4j with default properties");
}
loadStartupProperties();
String hostKey = properties.getProperty("host.key");
if(hostKey == null) {
InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress();
if(inetAddr != null)
hostKey = NetHelper.getMacAddress(inetAddr);
}
if(hostKey == null)
throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites");
String host = properties.getProperty("host");
if(host == null)
host = NetHelper.getHostName();
if(properties.get("bucket.dns") != null &&
((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) {
useSubDomain = true;
}
serviceEndpoint = (String)properties.get("serviceEndpoint");
masterDomain = new String( "." + serviceEndpoint );
setupHost(hostKey, host);
// we will commit and start a new transaction to allow host info be flushed to DB
PersistContext.flush();
String localStorageRoot = properties.getProperty("storage.root");
if (localStorageRoot != null) setupLocalStorage(localStorageRoot);
multipartDir = properties.getProperty("storage.multipartDir");
timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL);
if(logger.isInfoEnabled())
logger.info("ServiceProvider initialized");
}
private void loadStartupProperties() {
File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties");
properties = new Properties();
if(propertiesFile != null) {
try {
properties.load(new FileInputStream(propertiesFile));
} catch (FileNotFoundException e) {
logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e);
} catch (IOException e) {
logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e);
}
logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath());
} else {
if(logger.isInfoEnabled())
logger.info("Startup properties is not found.");
}
}
private TimerTask getHeartbeatTask() {
return new TimerTask() {
@Override
public void run() {
try {
MHostDao mhostDao = new MHostDao();
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhostDao.update(mhost);
PersistContext.commitTransaction();
} catch(Throwable e){
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
PersistContext.closeSession();
}
}
};
}
private void setupHost(String hostKey, String host) {
MHostDao mhostDao = new MHostDao();
mhost = mhostDao.getByHostKey(hostKey);
if(mhost == null) {
mhost = new MHost();
mhost.setHostKey(hostKey);
mhost.setHost(host);
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhostDao.save(mhost);
} else {
mhost.setHost(host);
mhostDao.update(mhost);
}
}
private void setupLocalStorage(String storageRoot) {
SHostDao shostDao = new SHostDao();
SHost shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
if(shost == null) {
shost = new SHost();
shost.setMhost(mhost);
mhost.getLocalSHosts().add(shost);
shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL);
shost.setHost(NetHelper.getHostName());
shost.setExportRoot(storageRoot);
PersistContext.getSession().save(shost);
}
}
public void shutdown() {
timer.cancel();
if(logger.isInfoEnabled())
logger.info("ServiceProvider stopped");
}
@SuppressWarnings("unchecked")
private static <T> T getProxy(Class<?> serviceInterface, final T serviceObject) {
return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(),
new Class[] { serviceInterface },
new InvocationHandler() {
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
Object result = null;
try {
result = method.invoke(serviceObject, args);
PersistContext.commitTransaction();
PersistContext.commitTransaction(true);
} catch (PersistException e) {
} catch (SessionException e) {
} catch(Throwable e) {
// Rethrow the exception to Axis:
// Check if the exception is an AxisFault or a RuntimeException
// enveloped AxisFault and if so, pass it on as such. Otherwise
// log to help debugging and throw as is.
if (e.getCause() != null && e.getCause() instanceof AxisFault)
throw e.getCause();
else if (e.getCause() != null && e.getCause().getCause() != null
&& e.getCause().getCause() instanceof AxisFault)
throw e.getCause().getCause();
else {
logger.warn("Unhandled exception " + e.getMessage(), e);
throw e;
}
} finally {
PersistContext.closeSession();
PersistContext.closeSession(true);
}
return result;
}
});
}
@SuppressWarnings("unchecked")
public <T> T getServiceImpl(Class<?> serviceInterface) {
return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface));
}
}

View File

@ -1,29 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service;
import java.io.IOException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.xml.stream.XMLStreamException;
/**
* @author Kelven Yang
*/
public interface ServletAction {
void execute(HttpServletRequest request, HttpServletResponse response) throws IOException, XMLStreamException;
}

View File

@ -53,6 +53,7 @@ import com.cloud.bridge.persist.dao.SAclDao;
import com.cloud.bridge.persist.dao.SBucketDao;
import com.cloud.bridge.service.S3Constants;
import com.cloud.bridge.service.S3RestServlet;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.service.core.s3.S3AccessControlList;
import com.cloud.bridge.service.core.s3.S3AccessControlPolicy;

View File

@ -1,358 +1,358 @@
/**
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
* This software is licensed under the GNU General Public License v3 or later.
*
* It is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.cloud.bridge.service.controller.s3;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetAddress;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.axis2.AxisFault;
import org.apache.log4j.Logger;
import org.apache.log4j.xml.DOMConfigurator;
import org.hibernate.SessionException;
import com.amazon.s3.AmazonS3SkeletonInterface;
import com.amazon.ec2.AmazonEC2SkeletonInterface;
import com.cloud.bridge.model.MHost;
import com.cloud.bridge.model.SHost;
import com.cloud.bridge.model.UserCredentials;
import com.cloud.bridge.persist.PersistContext;
import com.cloud.bridge.persist.PersistException;
import com.cloud.bridge.persist.dao.MHostDao;
import com.cloud.bridge.persist.dao.SHostDao;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.EC2SoapServiceImpl;
import com.cloud.bridge.service.UserInfo;
import com.cloud.bridge.service.core.ec2.EC2Engine;
import com.cloud.bridge.service.core.s3.S3BucketPolicy;
import com.cloud.bridge.service.core.s3.S3Engine;
import com.cloud.bridge.service.exception.ConfigurationException;
import com.cloud.bridge.util.ConfigurationHelper;
import com.cloud.bridge.util.DateHelper;
import com.cloud.bridge.util.NetHelper;
import com.cloud.bridge.util.OrderedPair;
/**
* @author Kelven Yang
*/
public class ServiceProvider {
protected final static Logger logger = Logger.getLogger(ServiceProvider.class);
public final static long HEARTBEAT_INTERVAL = 10000;
private static ServiceProvider instance;
private Map<Class<?>, Object> serviceMap = new HashMap<Class<?>, Object>();
private Timer timer = new Timer();
private MHost mhost;
private Properties properties;
private boolean useSubDomain = false; // use DNS sub domain for bucket name
private String serviceEndpoint = null;
private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts
private String masterDomain = ".s3.amazonaws.com";
private S3Engine engine;
private EC2Engine EC2_engine = null;
// -> cache Bucket Policies here so we don't have to load from db on every access
private Map<String,S3BucketPolicy> policyMap = new HashMap<String,S3BucketPolicy>();
protected ServiceProvider() throws IOException {
// register service implementation object
engine = new S3Engine();
EC2_engine = new EC2Engine();
serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine));
serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine));
}
public synchronized static ServiceProvider getInstance() {
if(instance == null)
{
try {
instance = new ServiceProvider();
instance.initialize();
PersistContext.commitTransaction();
} catch(Throwable e) {
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
PersistContext.closeSession();
}
}
return instance;
}
public long getManagementHostId() {
// we want to limit mhost within its own session, id of the value will be returned
long mhostId = 0;
if(mhost != null)
mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L;
return mhostId;
}
/**
* We return a 2-tuple to distinguish between two cases:
* (1) there is no entry in the map for bucketName, and (2) there is a null entry
* in the map for bucketName. In case 2, the database was inspected for the
* bucket policy but it had none so we cache it here to reduce database lookups.
* @param bucketName
* @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined
* even if it is set at null.
*/
public OrderedPair<S3BucketPolicy,Integer> getBucketPolicy(String bucketName) {
if (policyMap.containsKey( bucketName )) {
S3BucketPolicy policy = policyMap.get( bucketName );
return new OrderedPair<S3BucketPolicy,Integer>( policy, 0 );
}
else return new OrderedPair<S3BucketPolicy,Integer>( null, -1 ); // For case (1) where the map has no entry for bucketName
}
/**
* The policy parameter can be set to null, which means that there is no policy
* for the bucket so a database lookup is not necessary.
*
* @param bucketName
* @param policy
*/
public void setBucketPolicy(String bucketName, S3BucketPolicy policy) {
policyMap.put(bucketName, policy);
}
public void deleteBucketPolicy(String bucketName) {
policyMap.remove(bucketName);
}
public S3Engine getS3Engine() {
return engine;
}
public EC2Engine getEC2Engine() {
return EC2_engine;
}
public String getMasterDomain() {
return masterDomain;
}
public boolean getUseSubDomain() {
return useSubDomain;
}
public String getServiceEndpoint() {
return serviceEndpoint;
}
public String getMultipartDir() {
return multipartDir;
}
public Properties getStartupProperties() {
return properties;
}
public UserInfo getUserInfo(String accessKey)
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException {
UserInfo info = new UserInfo();
UserCredentialsDao credentialDao = new UserCredentialsDao();
UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey );
if ( null == cloudKeys ) {
logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
return null;
} else {
info.setAccessKey( accessKey );
info.setSecretKey( cloudKeys.getSecretKey());
info.setCanonicalUserId(accessKey);
info.setDescription( "S3 REST request" );
return info;
}
}
protected void initialize() {
if(logger.isInfoEnabled())
logger.info("Initializing ServiceProvider...");
File file = ConfigurationHelper.findConfigurationFile("log4j-cloud-bridge.xml");
if(file != null) {
System.out.println("Log4j configuration from : " + file.getAbsolutePath());
DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000);
} else {
System.out.println("Configure log4j with default properties");
}
loadStartupProperties();
String hostKey = properties.getProperty("host.key");
if(hostKey == null) {
InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress();
if(inetAddr != null)
hostKey = NetHelper.getMacAddress(inetAddr);
}
if(hostKey == null)
throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites");
String host = properties.getProperty("host");
if(host == null)
host = NetHelper.getHostName();
if(properties.get("bucket.dns") != null &&
((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) {
useSubDomain = true;
}
serviceEndpoint = (String)properties.get("serviceEndpoint");
masterDomain = new String( "." + serviceEndpoint );
setupHost(hostKey, host);
// we will commit and start a new transaction to allow host info be flushed to DB
PersistContext.flush();
String localStorageRoot = properties.getProperty("storage.root");
if (localStorageRoot != null) setupLocalStorage(localStorageRoot);
multipartDir = properties.getProperty("storage.multipartDir");
timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL);
if(logger.isInfoEnabled())
logger.info("ServiceProvider initialized");
}
private void loadStartupProperties() {
File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties");
properties = new Properties();
if(propertiesFile != null) {
try {
properties.load(new FileInputStream(propertiesFile));
} catch (FileNotFoundException e) {
logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e);
} catch (IOException e) {
logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e);
}
logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath());
} else {
if(logger.isInfoEnabled())
logger.info("Startup properties is not found.");
}
}
private TimerTask getHeartbeatTask() {
return new TimerTask() {
@Override
public void run() {
try {
MHostDao mhostDao = new MHostDao();
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhostDao.update(mhost);
PersistContext.commitTransaction();
} catch(Throwable e){
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
PersistContext.closeSession();
}
}
};
}
private void setupHost(String hostKey, String host) {
MHostDao mhostDao = new MHostDao();
mhost = mhostDao.getByHostKey(hostKey);
if(mhost == null) {
mhost = new MHost();
mhost.setHostKey(hostKey);
mhost.setHost(host);
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhostDao.save(mhost);
} else {
mhost.setHost(host);
mhostDao.update(mhost);
}
}
private void setupLocalStorage(String storageRoot) {
SHostDao shostDao = new SHostDao();
SHost shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
if(shost == null) {
shost = new SHost();
shost.setMhost(mhost);
mhost.getLocalSHosts().add(shost);
shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL);
shost.setHost(NetHelper.getHostName());
shost.setExportRoot(storageRoot);
PersistContext.getSession().save(shost);
}
}
public void shutdown() {
timer.cancel();
if(logger.isInfoEnabled())
logger.info("ServiceProvider stopped");
}
@SuppressWarnings("unchecked")
private static <T> T getProxy(Class<?> serviceInterface, final T serviceObject) {
return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(),
new Class[] { serviceInterface },
new InvocationHandler() {
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
Object result = null;
try {
result = method.invoke(serviceObject, args);
PersistContext.commitTransaction();
} catch (PersistException e) {
} catch (SessionException e) {
} catch(Throwable e) {
// Rethrow the exception to Axis:
// Check if the exception is an AxisFault or a RuntimeException
// enveloped AxisFault and if so, pass it on as such. Otherwise
// log to help debugging and throw as is.
if (e.getCause() != null && e.getCause() instanceof AxisFault)
throw e.getCause();
else if (e.getCause() != null && e.getCause().getCause() != null
&& e.getCause().getCause() instanceof AxisFault)
throw e.getCause().getCause();
else {
logger.warn("Unhandled exception " + e.getMessage(), e);
throw e;
}
} finally {
PersistContext.closeSession();
}
return result;
}
});
}
@SuppressWarnings("unchecked")
public <T> T getServiceImpl(Class<?> serviceInterface) {
return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface));
}
}
/**
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
* This software is licensed under the GNU General Public License v3 or later.
*
* It is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.cloud.bridge.service.controller.s3;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetAddress;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.axis2.AxisFault;
import org.apache.log4j.Logger;
import org.apache.log4j.xml.DOMConfigurator;
import org.hibernate.SessionException;
import com.amazon.s3.AmazonS3SkeletonInterface;
import com.amazon.ec2.AmazonEC2SkeletonInterface;
import com.cloud.bridge.model.MHost;
import com.cloud.bridge.model.SHost;
import com.cloud.bridge.model.UserCredentials;
import com.cloud.bridge.persist.PersistContext;
import com.cloud.bridge.persist.PersistException;
import com.cloud.bridge.persist.dao.MHostDao;
import com.cloud.bridge.persist.dao.SHostDao;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.EC2SoapServiceImpl;
import com.cloud.bridge.service.UserInfo;
import com.cloud.bridge.service.core.ec2.EC2Engine;
import com.cloud.bridge.service.core.s3.S3BucketPolicy;
import com.cloud.bridge.service.core.s3.S3Engine;
import com.cloud.bridge.service.exception.ConfigurationException;
import com.cloud.bridge.util.ConfigurationHelper;
import com.cloud.bridge.util.DateHelper;
import com.cloud.bridge.util.NetHelper;
import com.cloud.bridge.util.OrderedPair;
/**
* @author Kelven Yang
*/
public class ServiceProvider {
protected final static Logger logger = Logger.getLogger(ServiceProvider.class);
public final static long HEARTBEAT_INTERVAL = 10000;
private static ServiceProvider instance;
private Map<Class<?>, Object> serviceMap = new HashMap<Class<?>, Object>();
private Timer timer = new Timer();
private MHost mhost;
private Properties properties;
private boolean useSubDomain = false; // use DNS sub domain for bucket name
private String serviceEndpoint = null;
private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts
private String masterDomain = ".s3.amazonaws.com";
private S3Engine engine;
private EC2Engine EC2_engine = null;
// -> cache Bucket Policies here so we don't have to load from db on every access
private Map<String,S3BucketPolicy> policyMap = new HashMap<String,S3BucketPolicy>();
protected ServiceProvider() throws IOException {
// register service implementation object
engine = new S3Engine();
EC2_engine = new EC2Engine();
serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine));
serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine));
}
public synchronized static ServiceProvider getInstance() {
if(instance == null)
{
try {
instance = new ServiceProvider();
instance.initialize();
PersistContext.commitTransaction();
} catch(Throwable e) {
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
PersistContext.closeSession();
}
}
return instance;
}
public long getManagementHostId() {
// we want to limit mhost within its own session, id of the value will be returned
long mhostId = 0;
if(mhost != null)
mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L;
return mhostId;
}
/**
* We return a 2-tuple to distinguish between two cases:
* (1) there is no entry in the map for bucketName, and (2) there is a null entry
* in the map for bucketName. In case 2, the database was inspected for the
* bucket policy but it had none so we cache it here to reduce database lookups.
* @param bucketName
* @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined
* even if it is set at null.
*/
public OrderedPair<S3BucketPolicy,Integer> getBucketPolicy(String bucketName) {
if (policyMap.containsKey( bucketName )) {
S3BucketPolicy policy = policyMap.get( bucketName );
return new OrderedPair<S3BucketPolicy,Integer>( policy, 0 );
}
else return new OrderedPair<S3BucketPolicy,Integer>( null, -1 ); // For case (1) where the map has no entry for bucketName
}
/**
* The policy parameter can be set to null, which means that there is no policy
* for the bucket so a database lookup is not necessary.
*
* @param bucketName
* @param policy
*/
public void setBucketPolicy(String bucketName, S3BucketPolicy policy) {
policyMap.put(bucketName, policy);
}
public void deleteBucketPolicy(String bucketName) {
policyMap.remove(bucketName);
}
public S3Engine getS3Engine() {
return engine;
}
public EC2Engine getEC2Engine() {
return EC2_engine;
}
public String getMasterDomain() {
return masterDomain;
}
public boolean getUseSubDomain() {
return useSubDomain;
}
public String getServiceEndpoint() {
return serviceEndpoint;
}
public String getMultipartDir() {
return multipartDir;
}
public Properties getStartupProperties() {
return properties;
}
public UserInfo getUserInfo(String accessKey)
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException {
UserInfo info = new UserInfo();
UserCredentialsDao credentialDao = new UserCredentialsDao();
UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey );
if ( null == cloudKeys ) {
logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
return null;
} else {
info.setAccessKey( accessKey );
info.setSecretKey( cloudKeys.getSecretKey());
info.setCanonicalUserId(accessKey);
info.setDescription( "S3 REST request" );
return info;
}
}
protected void initialize() {
if(logger.isInfoEnabled())
logger.info("Initializing ServiceProvider...");
File file = ConfigurationHelper.findConfigurationFile("log4j-cloud-bridge.xml");
if(file != null) {
System.out.println("Log4j configuration from : " + file.getAbsolutePath());
DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000);
} else {
System.out.println("Configure log4j with default properties");
}
loadStartupProperties();
String hostKey = properties.getProperty("host.key");
if(hostKey == null) {
InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress();
if(inetAddr != null)
hostKey = NetHelper.getMacAddress(inetAddr);
}
if(hostKey == null)
throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites");
String host = properties.getProperty("host");
if(host == null)
host = NetHelper.getHostName();
if(properties.get("bucket.dns") != null &&
((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) {
useSubDomain = true;
}
serviceEndpoint = (String)properties.get("serviceEndpoint");
masterDomain = new String( "." + serviceEndpoint );
setupHost(hostKey, host);
// we will commit and start a new transaction to allow host info be flushed to DB
PersistContext.flush();
String localStorageRoot = properties.getProperty("storage.root");
if (localStorageRoot != null) setupLocalStorage(localStorageRoot);
multipartDir = properties.getProperty("storage.multipartDir");
timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL);
if(logger.isInfoEnabled())
logger.info("ServiceProvider initialized");
}
private void loadStartupProperties() {
File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties");
properties = new Properties();
if(propertiesFile != null) {
try {
properties.load(new FileInputStream(propertiesFile));
} catch (FileNotFoundException e) {
logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e);
} catch (IOException e) {
logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e);
}
logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath());
} else {
if(logger.isInfoEnabled())
logger.info("Startup properties is not found.");
}
}
private TimerTask getHeartbeatTask() {
return new TimerTask() {
@Override
public void run() {
try {
MHostDao mhostDao = new MHostDao();
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhostDao.update(mhost);
PersistContext.commitTransaction();
} catch(Throwable e){
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
PersistContext.closeSession();
}
}
};
}
private void setupHost(String hostKey, String host) {
MHostDao mhostDao = new MHostDao();
mhost = mhostDao.getByHostKey(hostKey);
if(mhost == null) {
mhost = new MHost();
mhost.setHostKey(hostKey);
mhost.setHost(host);
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhostDao.save(mhost);
} else {
mhost.setHost(host);
mhostDao.update(mhost);
}
}
private void setupLocalStorage(String storageRoot) {
SHostDao shostDao = new SHostDao();
SHost shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
if(shost == null) {
shost = new SHost();
shost.setMhost(mhost);
mhost.getLocalSHosts().add(shost);
shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL);
shost.setHost(NetHelper.getHostName());
shost.setExportRoot(storageRoot);
PersistContext.getSession().save(shost);
}
}
public void shutdown() {
timer.cancel();
if(logger.isInfoEnabled())
logger.info("ServiceProvider stopped");
}
@SuppressWarnings("unchecked")
private static <T> T getProxy(Class<?> serviceInterface, final T serviceObject) {
return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(),
new Class[] { serviceInterface },
new InvocationHandler() {
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
Object result = null;
try {
result = method.invoke(serviceObject, args);
PersistContext.commitTransaction();
} catch (PersistException e) {
} catch (SessionException e) {
} catch(Throwable e) {
// Rethrow the exception to Axis:
// Check if the exception is an AxisFault or a RuntimeException
// enveloped AxisFault and if so, pass it on as such. Otherwise
// log to help debugging and throw as is.
if (e.getCause() != null && e.getCause() instanceof AxisFault)
throw e.getCause();
else if (e.getCause() != null && e.getCause().getCause() != null
&& e.getCause().getCause() instanceof AxisFault)
throw e.getCause().getCause();
else {
logger.warn("Unhandled exception " + e.getMessage(), e);
throw e;
}
} finally {
PersistContext.closeSession();
}
return result;
}
});
}
@SuppressWarnings("unchecked")
public <T> T getServiceImpl(Class<?> serviceInterface) {
return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,84 +1,84 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service.core.s3;
import java.util.List;
import com.cloud.bridge.model.SAcl;
import com.cloud.bridge.model.SBucket;
import com.cloud.bridge.service.exception.UnsupportedException;
/**
* @author Kelven Yang, John Zucker
* Each relation holds
* a grantee - which is one of SAcl.GRANTEE_USER, SAcl.GRANTEE_ALLUSERS, SAcl.GRANTEE_AUTHENTICATED
* a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
* SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL
* canonicalUserID
*/
public class S3Grant {
private int grantee; // SAcl.GRANTEE_USER etc
private int permission; // SAcl.PERMISSION_READ etc
private String canonicalUserID;
public S3Grant() {
}
public int getGrantee() {
return grantee;
}
public void setGrantee(int grantee) {
this.grantee = grantee;
}
public int getPermission() {
return permission;
}
public void setPermission(int permission) {
this.permission = permission;
}
public String getCanonicalUserID() {
return canonicalUserID;
}
public void setCanonicalUserID(String canonicalUserID) {
this.canonicalUserID = canonicalUserID;
}
/* Return an array of S3Grants holding the permissions of grantees by grantee type and their canonicalUserIds.
* Used by S3 engine to get ACL policy requests for buckets and objects.
*/
public static S3Grant[] toGrants(List<SAcl> grants) {
if(grants != null)
{
S3Grant[] entries = new S3Grant[grants.size()];
int i = 0;
for(SAcl acl: grants) {
entries[i] = new S3Grant();
entries[i].setGrantee(acl.getGranteeType());
entries[i].setCanonicalUserID(acl.getGranteeCanonicalId());
entries[i].setPermission(acl.getPermission());
i++;
}
return entries;
}
return null;
}
}
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service.core.s3;
import java.util.List;
import com.cloud.bridge.model.SAcl;
import com.cloud.bridge.model.SBucket;
import com.cloud.bridge.service.exception.UnsupportedException;
/**
* @author Kelven Yang, John Zucker
* Each relation holds
* a grantee - which is one of SAcl.GRANTEE_USER, SAcl.GRANTEE_ALLUSERS, SAcl.GRANTEE_AUTHENTICATED
* a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
* SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL
* canonicalUserID
*/
public class S3Grant {
private int grantee; // SAcl.GRANTEE_USER etc
private int permission; // SAcl.PERMISSION_READ etc
private String canonicalUserID;
public S3Grant() {
}
public int getGrantee() {
return grantee;
}
public void setGrantee(int grantee) {
this.grantee = grantee;
}
public int getPermission() {
return permission;
}
public void setPermission(int permission) {
this.permission = permission;
}
public String getCanonicalUserID() {
return canonicalUserID;
}
public void setCanonicalUserID(String canonicalUserID) {
this.canonicalUserID = canonicalUserID;
}
/* Return an array of S3Grants holding the permissions of grantees by grantee type and their canonicalUserIds.
* Used by S3 engine to get ACL policy requests for buckets and objects.
*/
public static S3Grant[] toGrants(List<SAcl> grants) {
if(grants != null)
{
S3Grant[] entries = new S3Grant[grants.size()];
int i = 0;
for(SAcl acl: grants) {
entries[i] = new S3Grant();
entries[i].setGrantee(acl.getGranteeType());
entries[i].setCanonicalUserID(acl.getGranteeCanonicalId());
entries[i].setPermission(acl.getPermission());
i++;
}
return entries;
}
return null;
}
}

View File

@ -1,58 +1,58 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service.core.s3;
import java.util.Calendar;
import java.util.TimeZone;
/**
* @author Kelven Yang
*/
public class S3ListAllMyBucketsEntry {
private String name;
private Calendar creationDate;
public S3ListAllMyBucketsEntry() {
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Calendar getCreationDate() {
// cal.setTimeZone(TimeZone.getTimeZone("Z"));
// java.util.Date d = cal.getTime();
// java.util.Date d = creationDate.getTime();
// com.cloud.bridge.util.ISO8601SimpleDateTimeFormat sdf = new com.cloud.bridge.util.ISO8601SimpleDateTimeFormat();
// sdf.format(d);
// java.lang.StringBuffer b = com.cloud.bridge.util.ISO8601SimpleDateTimeFormat.format(d); return b;
return creationDate;
}
public void setCreationDate(Calendar creationDate) {
this.creationDate = creationDate;
}
}
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service.core.s3;
import java.util.Calendar;
import java.util.TimeZone;
/**
* @author Kelven Yang
*/
public class S3ListAllMyBucketsEntry {
private String name;
private Calendar creationDate;
public S3ListAllMyBucketsEntry() {
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Calendar getCreationDate() {
// cal.setTimeZone(TimeZone.getTimeZone("Z"));
// java.util.Date d = cal.getTime();
// java.util.Date d = creationDate.getTime();
// com.cloud.bridge.util.ISO8601SimpleDateTimeFormat sdf = new com.cloud.bridge.util.ISO8601SimpleDateTimeFormat();
// sdf.format(d);
// java.lang.StringBuffer b = com.cloud.bridge.util.ISO8601SimpleDateTimeFormat.format(d); return b;
return creationDate;
}
public void setCreationDate(Calendar creationDate) {
this.creationDate = creationDate;
}
}

View File

@ -1,111 +1,111 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service.core.s3;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import javax.activation.DataHandler;
/**
* @author Kelven Yang, John Zucker
*/
public class S3PutObjectInlineRequest extends S3Request {
protected String bucketName;
protected String key;
protected long contentLength;
protected S3MetaDataEntry[] metaEntries;
protected S3AccessControlList acl;
protected String cannedAccessPolicy; // Canned ACLs are public-read, public-read-write, private, authenticated-read or log-delivery-write
protected DataHandler data;
protected String dataAsString;
public S3PutObjectInlineRequest() {
super();
data = null;
}
public String getBucketName() {
return bucketName;
}
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public long getContentLength() {
return contentLength;
}
public void setContentLength(long contentLength) {
this.contentLength = contentLength;
}
public S3MetaDataEntry[] getMetaEntries() {
return metaEntries;
}
public void setMetaEntries(S3MetaDataEntry[] metaEntries) {
this.metaEntries = metaEntries;
}
public S3AccessControlList getAcl() {
return acl;
}
public void setAcl(S3AccessControlList acl) {
this.acl = acl;
}
public String getCannedAccess() {
return cannedAccessPolicy;
}
public void setCannedAccess(String cannedAccessPolicy) {
this.cannedAccessPolicy = cannedAccessPolicy;
}
public DataHandler getData() {
return data;
}
public void setData(DataHandler data) {
this.data = data;
}
public void setDataAsString( String data ) {
this.dataAsString = data;
}
public InputStream getDataInputStream() throws IOException
{
if ( null == data )
{
ByteArrayInputStream bs = new ByteArrayInputStream( dataAsString.getBytes());
return bs;
}
else return data.getInputStream();
}
}
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service.core.s3;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import javax.activation.DataHandler;
/**
* @author Kelven Yang, John Zucker
*/
public class S3PutObjectInlineRequest extends S3Request {
protected String bucketName;
protected String key;
protected long contentLength;
protected S3MetaDataEntry[] metaEntries;
protected S3AccessControlList acl;
protected String cannedAccessPolicy; // Canned ACLs are public-read, public-read-write, private, authenticated-read or log-delivery-write
protected DataHandler data;
protected String dataAsString;
public S3PutObjectInlineRequest() {
super();
data = null;
}
public String getBucketName() {
return bucketName;
}
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public long getContentLength() {
return contentLength;
}
public void setContentLength(long contentLength) {
this.contentLength = contentLength;
}
public S3MetaDataEntry[] getMetaEntries() {
return metaEntries;
}
public void setMetaEntries(S3MetaDataEntry[] metaEntries) {
this.metaEntries = metaEntries;
}
public S3AccessControlList getAcl() {
return acl;
}
public void setAcl(S3AccessControlList acl) {
this.acl = acl;
}
public String getCannedAccess() {
return cannedAccessPolicy;
}
public void setCannedAccess(String cannedAccessPolicy) {
this.cannedAccessPolicy = cannedAccessPolicy;
}
public DataHandler getData() {
return data;
}
public void setData(DataHandler data) {
this.data = data;
}
public void setDataAsString( String data ) {
this.dataAsString = data;
}
public InputStream getDataInputStream() throws IOException
{
if ( null == data )
{
ByteArrayInputStream bs = new ByteArrayInputStream( dataAsString.getBytes());
return bs;
}
else return data.getInputStream();
}
}

View File

@ -1,67 +1,67 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service.core.s3;
import java.util.Calendar;
/**
* @author Kelven Yang, John Zucker
*/
public class S3PutObjectInlineResponse extends S3Response {
protected String ETag;
protected Calendar lastModified;
protected String version;
protected int uploadId;
public S3PutObjectInlineResponse() {
super();
uploadId = -1;
}
// add ETag header computed as Base64 MD5 whenever object is uploaded or updated
// the Base64 is represented in lowercase
public String getETag() {
return ETag;
}
public void setETag(String eTag) {
this.ETag = eTag;
}
public Calendar getLastModified() {
return lastModified;
}
public void setLastModified(Calendar lastModified) {
this.lastModified = lastModified;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public int getUploadId() {
return uploadId;
}
public void setUploadId(int uploadId) {
this.uploadId = uploadId;
}
}
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.service.core.s3;
import java.util.Calendar;
/**
* @author Kelven Yang, John Zucker
*/
public class S3PutObjectInlineResponse extends S3Response {
protected String ETag;
protected Calendar lastModified;
protected String version;
protected int uploadId;
public S3PutObjectInlineResponse() {
super();
uploadId = -1;
}
// add ETag header computed as Base64 MD5 whenever object is uploaded or updated
// the Base64 is represented in lowercase
public String getETag() {
return ETag;
}
public void setETag(String eTag) {
this.ETag = eTag;
}
public Calendar getLastModified() {
return lastModified;
}
public void setLastModified(Calendar lastModified) {
this.lastModified = lastModified;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public int getUploadId() {
return uploadId;
}
public void setUploadId(int uploadId) {
this.uploadId = uploadId;
}
}

View File

@ -1,617 +0,0 @@
/* Took the basic code from Axis 1.2 and modified to fit into the cloud code base */
/*
* Copyright 2001-2004 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.util;
import java.io.IOException;
import java.io.InputStream;
import java.io.FilterInputStream;
import org.apache.log4j.Logger;
/**
* This class takes the input stream and turns it multiple streams.
DIME version 0 format
<pre>
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ---
| VERSION |B|E|C| TYPE_T| OPT_T | OPTIONS_LENGTH | A
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ID_LENGTH | TYPE_LENGTH | Always present 12 bytes
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ even on chunked data.
| DATA_LENGTH | V
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ---
| /
/ OPTIONS + PADDING /
/ (absent for version 0) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| /
/ ID + PADDING /
/ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| /
/ TYPE + PADDING /
/ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| /
/ DATA + PADDING /
/ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
</pre>
* This implementation of input stream does not support marking operations.
*
* Incoming data is DIME encoded when its MIME type is "application/dime".
* Then use this class to pull out 2 streams:
* (1) The first stream is the SOAP request,
* (2) The second stream is a chunked attachment (e.g., a file to store)
*
* The DIME format is defined at this reference:
* http://msdn.microsoft.com/en-us/library/aa480488.aspx
*
* @author Rick Rineholt
*/
public class DimeDelimitedInputStream extends FilterInputStream
{
protected final static Logger logger = Logger.getLogger(DimeDelimitedInputStream.class);
InputStream is = null; //The source input stream.
boolean closed = true; //The stream has been closed.
boolean theEnd = false; //There are no more streams left.
boolean moreChunks = false; //More chunks are a coming!
boolean MB = false; //Message begin flag
boolean ME = false; //Message end flag
String type = null; //
String id = null; //
String tnf = null; //DIME type format
long recordLength = 0L; //length of the current record.
long bytesRead = 0L; //How many bytes of the current record have been read.
int dataPadLength = 0; //How many pad bytes there are.
protected int streamNo = 0;
protected IOException streamInError = null;
private static byte[] trash = new byte[4];
protected static int streamCount = 0; //number of streams produced.
protected static synchronized int newStreamNo()
{
logger.debug( "streamNo " + (streamCount + 1));
return ++streamCount;
}
/**
* There can be multiple streams in a DIME encoding. For example, the first
* stream can be a SOAP message, and the second stream a binary attachment (e.g.,
* a file). During reading after an EOF is returned, this function should be
* called to see if there is another stream following the last.
*
* @return the dime delimited stream, null if there are no more streams
* @throws IOException if there was an error loading the data for the next stream
*/
public synchronized DimeDelimitedInputStream getNextStream() throws IOException
{
if (null != streamInError) throw streamInError;
if (theEnd) return null;
//Each Stream must be read in succession
if (bytesRead < recordLength || moreChunks)
throw new RuntimeException("attach.dimeReadFullyError");
dataPadLength -= readPad(dataPadLength);
//Create an new dime stream that comes after this one.
return new DimeDelimitedInputStream( this.is );
}
/**
* Create a new dime stream.
*
* @param is the <code>InputStream</code> to wrap
* @throws IOException if anything goes wrong
*/
public DimeDelimitedInputStream( InputStream is ) throws IOException
{
super(null);
streamNo = newStreamNo();
closed = false;
this.is = is;
readHeader( false );
}
/**
* Make sure to skip the pad which appear in several parts of a DIME message.
* @param size
* @return
* @throws IOException
*/
private final int readPad( int size ) throws IOException
{
if (0 == size) return 0;
int read = readFromStream(trash, 0, size);
if (size != read)
{
streamInError = new IOException("attach.dimeNotPaddedCorrectly");
throw streamInError;
}
return read;
}
private final int readFromStream( byte[] b ) throws IOException
{
return readFromStream( b, 0, b.length );
}
private final int readFromStream( byte[] b, int start, int length ) throws IOException
{
int br = 0;
int brTotal = 0;
if (length == 0) return 0;
do
{ try
{
br = is.read( b, brTotal + start, length - brTotal );
}
catch (IOException e)
{
streamInError = e;
throw e;
}
if (br > 0) brTotal += br;
}
while( br > -1 && brTotal < length );
return br > -1 ? brTotal : br;
}
/**
* Get the id for this stream part.
* @return the id;
*/
public String getContentId()
{
return id;
}
public String getDimeTypeNameFormat()
{
return tnf;
}
/**
* Get the type, as read from the header.
* @return the type of this dime
*/
public String getType()
{
return type;
}
/**
* Read from the DIME stream.
*
* @param b is the array to read into.
* @param off is the offset
* @return the number of bytes read. -1 if endof stream
* @throws IOException if data could not be read from the stream
*/
public synchronized int read( byte[] b, int off, int len ) throws IOException
{
if (closed)
{
dataPadLength -= readPad(dataPadLength);
throw new IOException( "streamClosed" );
}
return _read( b, off, len );
}
protected int _read( byte[] b, int off, int len ) throws IOException
{
int totalbytesread = 0;
int bytes2read = 0;
if (len < 0)
throw new IllegalArgumentException( "attach.readLengthError" + len );
if (off < 0)
throw new IllegalArgumentException( "attach.readOffsetError" + off );
if (b == null)
throw new IllegalArgumentException( "attach.readArrayNullError" );
if (b.length < off + len)
throw new IllegalArgumentException("attach.readArraySizeError " + b.length + " " + len + " " + off );
if (null != streamInError) throw streamInError;
if (0 == len) return 0; //quick.
// odd case no data to read -- give back 0 next time -1;
if (recordLength == 0 && bytesRead == 0 && !moreChunks)
{
++bytesRead;
if (ME) finalClose();
return 0;
}
if (bytesRead >= recordLength && !moreChunks)
{
dataPadLength -= readPad( dataPadLength );
if (ME) finalClose();
return -1;
}
do
{ if (bytesRead >= recordLength && moreChunks) readHeader( true );
bytes2read = (int) Math.min( recordLength - bytesRead, (long)len - totalbytesread );
try
{ bytes2read = is.read( b, off + totalbytesread, bytes2read );
}
catch (IOException e)
{
streamInError = e;
throw e;
}
if (0 < bytes2read)
{
totalbytesread += bytes2read;
bytesRead += bytes2read;
}
}
while( bytes2read > -1 && totalbytesread < len && (bytesRead < recordLength || moreChunks));
if ( 0 > bytes2read )
{
if (moreChunks)
{
streamInError = new IOException("attach.DimeStreamError0");
throw streamInError;
}
if (bytesRead < recordLength)
{
streamInError = new IOException("attach.DimeStreamError1 " + (recordLength - bytesRead));
throw streamInError;
}
if (!ME)
{
streamInError = new IOException("attach.DimeStreamError0");
throw streamInError;
}
//in theory the last chunk of data should also have been padded, but lets be tolerant of that.
dataPadLength = 0;
}
else if (bytesRead >= recordLength)
{
//get rid of pading.
try
{ dataPadLength -= readPad( dataPadLength );
}
catch (IOException e)
{
//in theory the last chunk of data should also have been padded, but lets be tolerant of that.
if (!ME) throw e;
else
{
dataPadLength = 0;
streamInError = null;
}
}
}
if (bytesRead >= recordLength && ME) finalClose();
return totalbytesread >= 0 ? totalbytesread : -1;
}
/**
* The DIME header is read into local class data fields and are not
* passed as part of the stream data.
*
* @param isChunk
* @throws IOException
*/
protected void readHeader( boolean isChunk ) throws IOException
{
bytesRead = 0; //How many bytes of the record have been read.
if (isChunk)
{
if (!moreChunks) throw new RuntimeException("attach.DimeStreamError2");
dataPadLength -= readPad(dataPadLength); //Just in case it was left over.
}
byte[] header = new byte[12];
if (header.length != readFromStream( header) )
{
streamInError = new IOException("attach.DimeStreamError3 " + header.length );
throw streamInError;
}
//VERSION
byte version = (byte) ((header[0] >>> 3) & 0x1f);
if (version > 1)
{
streamInError = new IOException("attach.DimeStreamError4 " + version );
throw streamInError;
}
//B, E, C
MB = 0 != (0x4 & header[0]);
ME = 0 != (0x2 & header[0]);
moreChunks = 0 != (0x1 & header[0]);
//TYPE_T
if (!isChunk)
{
switch( ((header[1] >>> 4) & (byte)0x0f) ) {
case 0x00: tnf = "UNCHANGED"; break;
case 0x01: tnf = "MIME"; break;
case 0x02: tnf = "URI"; break;
default: tnf = "UNKNOWN"; break;
}
}
//OPTIONS_LENGTH
int optionsLength = ((((int) header[2]) << 8) & 0xff00) | ((int) header[3]);
//ID_LENGTH
int idLength = ((((int) header[4]) << 8) & 0xff00) | ((int) header[5]);
//TYPE_LENGTH
int typeLength = ((((int) header[6]) << 8) & 0xff00) | ((int) header[7]);
//DATA_LENGTH
recordLength = ((((long) header[8] ) << 24) & 0xff000000L) |
((((long) header[9] ) << 16) & 0xff0000L ) |
((((long) header[10]) << 8 ) & 0xff00L ) |
((long) header[11] & 0xffL );
//OPTIONS + PADDING
if (0 != optionsLength)
{
byte[] optBytes = new byte[optionsLength];
if (optionsLength != readFromStream( optBytes ))
{
streamInError = new IOException("attach.DimeStreamError5 " + optionsLength );
throw streamInError;
}
optBytes = null; // throw it away, don't know anything about options.
int pad = (int) ((4L - (optionsLength & 0x3L)) & 0x03L);
if (pad != readFromStream( header, 0, pad ))
{
streamInError = new IOException("attach.DimeStreamError7");
throw streamInError;
}
}
// ID + PADDING
if (0 < idLength)
{
byte[] idBytes = new byte[ idLength];
if (idLength != readFromStream( idBytes ))
{
streamInError = new IOException("attach.DimeStreamError8");
throw streamInError;
}
if (idLength != 0 && !isChunk) id = new String(idBytes);
int pad = (int) ((4L - (idLength & 0x3L)) & 0x03L);
if (pad != readFromStream( header, 0, pad ))
{
streamInError = new IOException("attach.DimeStreamError9");
throw streamInError;
}
}
//TYPE + PADDING
if (0 < typeLength)
{
byte[] typeBytes = new byte[typeLength];
if (typeLength != readFromStream( typeBytes ))
{
streamInError = new IOException("attach.DimeStreamError10");
throw streamInError;
}
if (typeLength != 0 && !isChunk) type = new String(typeBytes);
int pad = (int) ((4L - (typeLength & 0x3L)) & 0x03L);
if (pad != readFromStream( header, 0, pad ))
{
streamInError = new IOException("attach.DimeStreamError11");
throw streamInError;
}
}
logger.debug("MB:" + MB + ", ME:" + ME + ", CF:" + moreChunks +
"Option length:" + optionsLength +
", ID length:" + idLength +
", typeLength:" + typeLength + ", TYPE_T:" + tnf);
logger.debug("id:\"" + id + "\"");
logger.debug("type:\"" + type + "\"");
logger.debug("recordlength:\"" + recordLength + "\"");
dataPadLength = (int) ((4L - (recordLength & 0x3L)) & 0x03L);
}
/**
* Read from the delimited stream.
*
* @param b is the array to read into. Read as much as possible
* into the size of this array.
* @return the number of bytes read. -1 if endof stream
* @throws IOException if data could not be read from the stream
*/
public int read( byte[] b ) throws IOException
{
return read( b, 0, b.length );
}
// fixme: this seems a bit inefficient
/**
* Read from the boundary delimited stream.
*
* @return the byte read, or -1 if endof stream
* @throws IOException if there was an error reading the data
*/
public int read() throws IOException
{
byte[] b = new byte[1];
int read = read( b, 0, 1 );
if (read < 0) return -1; // fixme: should we also check for read != 1?
return (b[0] & 0xff); // convert byte value to a positive int
}
/**
* Closes the stream.
* This will take care of flushing any remaining data to the stream.
* Multiple calls to this method will result in the stream being closed once
* and then all subsequent calls being ignored.
*
* @throws IOException if the stream could not be closed
*/
public void close() throws IOException
{
synchronized( this )
{
if (closed) return;
closed = true; //mark it closed.
}
logger.debug("bStreamClosed " + streamNo);
if (bytesRead < recordLength || moreChunks)
{
//We need get this off the stream. Easy way to flush through the stream;
byte[] readrest = new byte[1024 * 16];
int bread = 0;
do
{ bread = _read( readrest, 0, readrest.length ); //should also close the original stream.
}
while( bread > -1 );
}
dataPadLength -= readPad( dataPadLength );
}
/**
* Skip n bytes of data in the DIME stream, while reading and processing
* any headers in the current stream.
*
* @param n - number of data bytes to skip
* @return number of bytes actually skipped
* @throws IOException
*/
public long skip( long n ) throws IOException
{
long bytesSkipped = 0;
long bytes2Read = 0;
byte[] dumpbytes = new byte[1024];
while( n > 0 )
{
bytes2Read = (n > 1024 ? 1024 : n);
bytes2Read = _read( dumpbytes, 0, (int)bytes2Read );
n -= bytes2Read;
bytesSkipped += bytes2Read;
}
return bytesSkipped;
}
/**
* Mark the stream. This is not supported.
*/
public void mark( int readlimit )
{ //do nothing
}
public void reset() throws IOException
{
streamInError = new IOException("attach.bounday.mns");
throw streamInError;
}
public boolean markSupported()
{
return false;
}
public synchronized int available() throws IOException
{
if (null != streamInError) throw streamInError;
int chunkAvail = (int) Math.min((long)Integer.MAX_VALUE, recordLength - bytesRead);
int streamAvail = 0;
try
{ streamAvail = is.available();
}
catch( IOException e )
{
streamInError = e;
throw e;
}
if (chunkAvail == 0 && moreChunks && (12 + dataPadLength) <= streamAvail)
{
dataPadLength -= readPad(dataPadLength);
readHeader( true );
return available();
}
return Math.min( streamAvail, chunkAvail );
}
protected void finalClose() throws IOException
{
try
{ theEnd = true;
if(null != is) is.close();
}
finally
{
is= null;
}
}
}

View File

@ -1,61 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.util;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import javax.activation.DataSource;
import org.apache.log4j.Logger;
/**
* @author Kelven Yang
*/
public class FileRangeDataSource implements DataSource {
protected final static Logger logger = Logger.getLogger(FileRangeDataSource.class);
private FileRangeInputStream is;
public FileRangeDataSource(File file, long startPos, long endPos) throws IOException {
is = new FileRangeInputStream(file, startPos, endPos);
}
@Override
public String getContentType() {
assert(false);
return null;
}
@Override
public InputStream getInputStream() throws IOException {
return is;
}
@Override
public String getName() {
assert(false);
return null;
}
@Override
public OutputStream getOutputStream() throws IOException {
assert(false);
return null;
}
}

View File

@ -1,96 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.util;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
/**
* @author Kelven Yang
*/
public class FileRangeInputStream extends InputStream {
private RandomAccessFile randomAccessFile;
private long curPos;
private long endPos;
private long fileLength;
public FileRangeInputStream(File file, long startPos, long endPos) throws IOException {
fileLength = file.length();
if(startPos > fileLength)
startPos = fileLength;
if(endPos > fileLength)
endPos = fileLength;
if(startPos > endPos)
throw new IllegalArgumentException("Invalid file range " + startPos + "-" + endPos);
this.curPos = startPos;
this.endPos = endPos;
randomAccessFile = new RandomAccessFile(file, "r");
randomAccessFile.seek(startPos);
}
@Override
public int available() throws IOException {
return (int)(endPos - curPos);
}
@Override
public int read() throws IOException {
if(available() > 0) {
int value = randomAccessFile.read();
curPos++;
return value;
}
return -1;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int bytesToRead = Math.min(len, available());
if(bytesToRead == 0)
return -1;
int bytesRead = randomAccessFile.read(b, off, bytesToRead);
if(bytesRead < 0)
return -1;
curPos += bytesRead;
return bytesRead;
}
@Override
public long skip(long n) throws IOException {
long skipped = Math.min(n, available());
randomAccessFile.skipBytes((int)skipped);
curPos += skipped;
return skipped;
}
@Override
public void close() throws IOException {
randomAccessFile.close();
}
}

View File

@ -1,169 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.util;
import org.apache.log4j.Logger;
import java.io.InputStream;
import java.io.IOException;
/**
* A DIME stream is actually composed of multiple encoded streams.
* This class is a wrapper around the DimeDelimitedInputStream inorder
* to provide a simple iterator like interface for all the streams in a
* DIME encoded message.
*/
public class MultiPartDimeInputStream
{
protected final static Logger logger = Logger.getLogger(MultiPartDimeInputStream.class);
protected InputStream is = null;
protected DimeDelimitedInputStream currentStream = null;
protected int count = 0;
protected boolean eos = false;
protected String contentId = null;
protected String type = null;
protected String typeFormat = null;
/**
* The SOAP stream must be first, call nextInputStream to get
* access to the first stream and all streams after that.
*
* @param is the true input stream holding the incoming request.
*/
public MultiPartDimeInputStream( InputStream is ) throws IOException
{
this.is = is;
}
/**
* These three methods are DIME specific but provide potentially
* useful information about the current stream's data.
*
* @return URL or MIME type
*/
public String getStreamType()
{
return type;
}
public String getStreamTypeFormat()
{
// Is the type a URI or MIME type or just unknown?
return typeFormat;
}
public String getStreamId()
{
// The soap body might have string identifiers to point to other streams in the message
return contentId;
}
public InputStream getInputStream()
{
return currentStream;
}
public int available() throws IOException
{
if (eos) return -1;
if (null == currentStream)
{
throw new IOException( "streamClosed -- call nextInputStream()" );
}
return currentStream.available();
}
/**
* Move on to the next stream encoded in the DIME stream.
* If the current stream has not been all read, then we skip the remaining bytes of
* that stream.
*
* @return false if no next input stream, true if next input stream ready
* @throws IOException
*/
public boolean nextInputStream() throws IOException
{
if ( null == currentStream )
{
// on the first call to this function get the first stream
if (0 == count) currentStream = new DimeDelimitedInputStream( is );
}
else
{ // make sure the bytes of the previous stream are all skipped before we start the next
currentStream.close();
contentId = null;
type = null;
typeFormat = null;
currentStream = currentStream.getNextStream();
}
if ( null != currentStream )
{
contentId = currentStream.getContentId();
type = currentStream.getType();
typeFormat = currentStream.getDimeTypeNameFormat();
eos = false;
count++;
return true;
}
else return false;
}
public long skip( long n ) throws IOException
{
if (eos || null == currentStream)
{
throw new IOException( "streamClosed -- call nextInputStream()" );
}
return currentStream.skip( n );
}
public int read( byte[] b, int off, int len ) throws IOException
{
if (eos || null == currentStream) return -1;
int read = currentStream.read( b, off, len );
if (read < 0) eos = true;
return read;
}
public int read( byte[] b ) throws IOException
{
return read( b, 0, b.length );
}
public int read() throws IOException
{
if (eos || null == currentStream) return -1;
int ret = currentStream.read();
if (ret < 0) eos = true;
return ret;
}
}

View File

@ -1,359 +1,402 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.util;
import java.security.SignatureException;
import java.util.*;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.codec.binary.Base64;
import org.apache.log4j.Logger;
/**
* This class expects that the caller pulls the required headers from the standard
* HTTPServeletRequest structure. Notes are given below on what values are expected.
* This class is used for the Authentication check for REST requests and Query String
* Authentication requests.
*/
public class RestAuth {
protected final static Logger logger = Logger.getLogger(RestAuth.class);
// TreeMap: used when constructing the CanonicalizedAmzHeaders Element of the StringToSign
protected TreeMap<String, String> AmazonHeaders = null; // not always present
protected String bucketName = null; // not always present
protected String queryString = null; // only interested in a small set of values
protected String uriPath = null; // only interested in the resource path
protected String date = null; // only if x-amz-date is not set
protected String contentType = null; // not always present
protected String contentMD5 = null; // not always present
protected boolean amzDateSet = false;
protected boolean useSubDomain = false;
public RestAuth() {
// these must be lexicographically sorted
AmazonHeaders = new TreeMap<String, String>();
}
public RestAuth(boolean useSubDomain) {
// these must be lexicographically sorted
AmazonHeaders = new TreeMap<String, String>();
this.useSubDomain = useSubDomain;
}
public void setUseSubDomain(boolean value) {
useSubDomain = value;
}
public boolean getUseSubDomain() {
return useSubDomain;
}
/**
* This header is used iff the "x-amz-date:" header is not defined.
* Value is used in constructing the StringToSign for signature verification.
*
* @param date - the contents of the "Date:" header, skipping the 'Date:' preamble.
* OR pass in the value of the "Expires=" query string parameter passed in
* for "Query String Authentication".
*/
public void setDateHeader( String date ) {
if (this.amzDateSet) return;
if (null != date) date = date.trim();
this.date = date;
}
/**
* Value is used in constructing the StringToSign for signature verification.
*
* @param type - the contents of the "Content-Type:" header, skipping the 'Content-Type:' preamble.
*/
public void setContentTypeHeader( String type ) {
if (null != type) type = type.trim();
this.contentType = type;
}
/**
* Value is used in constructing the StringToSign for signature verification.
*
* @param type - the contents of the "Content-MD5:" header, skipping the 'Content-MD5:' preamble.
*/
public void setContentMD5Header( String md5 ) {
if (null != md5) md5 = md5.trim();
this.contentMD5 = md5;
}
/**
* The bucket name can be in the "Host:" header but it does not have to be. It can
* instead be in the uriPath as the first step in the path.
*
* Used as part of the CanonalizedResource element of the StringToSign.
* If we get "Host: static.johnsmith.net:8080", then the bucket name is "static.johnsmith.net"
*
* @param header - contents of the "Host:" header, skipping the 'Host:' preamble.
*/
public void setHostHeader( String header ) {
if (null == header) {
this.bucketName = null;
return;
}
// -> is there a port on the name?
header = header.trim();
int offset = header.indexOf( ":" );
if (-1 != offset) header = header.substring( 0, offset );
this.bucketName = header;
}
/**
* Used as part of the CanonalizedResource element of the StringToSign.
*
* @param query - results from calling "HttpServletRequest req.getQueryString()"
*/
public void setQueryString( String query ) {
if (null == query) {
this.queryString = null;
return;
}
query = new String( "?" + query.trim());
// -> only interested in this subset of parameters
if (query.startsWith( "?versioning") || query.startsWith( "?location" ) ||
query.startsWith( "?acl" ) || query.startsWith( "?torrent" )) {
// -> include any value (i.e., with '=') and chop of the rest
int offset = query.indexOf( "&" );
if ( -1 != offset ) query = query.substring( 0, offset );
this.queryString = query;
}
}
/**
* Used as part of the CanonalizedResource element of the StringToSign.
* Append the path part of the un-decoded HTTP Request-URI, up-to but not including the query string.
*
* @param path - - results from calling "HttpServletRequest req.getPathInfo()"
*/
public void addUriPath( String path ) {
if (null != path) path = path.trim();
this.uriPath = path;
}
/**
* Pass in each complete Amazon header found in the HTTP request one at a time.
* Each Amazon header added will become part of the signature calculation.
* We are using a TreeMap here because of the S3 definition:
* "Sort the collection of headers lexicographically by header name."
*
* @param headerAndValue - needs to be the complete amazon header (i.e., starts with "x-amz").
*/
public void addAmazonHeader( String headerAndValue ) {
if (null == headerAndValue) return;
String canonicalized = null;
// [A] First Canonicalize the header and its value
// -> we use the header 'name' as the key since we have to sort on that
int offset = headerAndValue.indexOf( ":" );
String header = headerAndValue.substring( 0, offset+1 ).toLowerCase();
String value = headerAndValue.substring( offset+1 ).trim();
// -> RFC 2616, Section 4.2: unfold the header's value by replacing linear white space with a single space character
// -> does the HTTPServeletReq already do this for us?
value = value.replaceAll( " ", " " ); // -> multiple spaces to one space
value = value.replaceAll( "(\r\n|\t|\n)", " " ); // -> CRLF, tab, and LF to one space
// [B] Does this header already exist?
if ( AmazonHeaders.containsKey( header )) {
// -> combine header fields with the same name into one "header-name:comma-separated-value-list" pair as prescribed by RFC 2616, section 4.2, without any white-space between values.
canonicalized = AmazonHeaders.get( header );
canonicalized = new String( canonicalized + "," + value + "\n" );
canonicalized = canonicalized.replaceAll( "\n,", "," ); // remove the '\n' from the first stored value
}
else canonicalized = new String( header + value + "\n" ); // -> as per spec, no space between header and its value
AmazonHeaders.put( header, canonicalized );
// [C] "x-amz-date:" takes precedence over the "Date:" header
if (header.equals( "x-amz-date:" )) {
this.amzDateSet = true;
if (null != this.date) this.date = null;
}
}
/**
* The request is authenticated if we can regenerate the same signature given
* on the request. Before calling this function make sure to set the header values
* defined by the public values above.
*
* @param httpVerb - the type of HTTP request (e.g., GET, PUT)
* @param secretKey - value obtained from the AWSAccessKeyId
* @param signature - the signature we are trying to recreate, note can be URL-encoded
*
* @throws SignatureException
*
* @return true if request has been authenticated, false otherwise
* @throws UnsupportedEncodingException
*/
public boolean verifySignature( String httpVerb, String secretKey, String signature )
throws SignatureException, UnsupportedEncodingException {
if (null == httpVerb || null == secretKey || null == signature) return false;
httpVerb = httpVerb.trim();
secretKey = secretKey.trim();
signature = signature.trim();
// -> first calculate the StringToSign after the caller has initialized all the header values
String StringToSign = genStringToSign( httpVerb );
String calSig = calculateRFC2104HMAC( StringToSign, secretKey );
// -> was the passed in signature URL encoded? (it must be base64 encoded)
int offset = signature.indexOf( "%" );
if (-1 != offset) signature = URLDecoder.decode( signature, "UTF-8" );
boolean match = signature.equals( calSig );
if (!match)
logger.error( "Signature mismatch, [" + signature + "] [" + calSig + "] over [" + StringToSign + "]" );
return match;
}
/**
* This function generates the single string that will be used to sign with a users
* secret key.
*
* StringToSign = HTTP-Verb + "\n" +
* Content-MD5 + "\n" +
* Content-Type + "\n" +
* Date + "\n" +
* CanonicalizedAmzHeaders +
* CanonicalizedResource;
*
* @return The single StringToSign or null.
*/
private String genStringToSign( String httpVerb ) {
StringBuffer canonicalized = new StringBuffer();
String temp = null;
canonicalized.append( httpVerb ).append( "\n" );
if (null != this.contentMD5)
canonicalized.append( this.contentMD5 );
canonicalized.append( "\n" );
if (null != this.contentType)
canonicalized.append( this.contentType );
canonicalized.append( "\n" );
if (null != this.date)
canonicalized.append( this.date );
canonicalized.append( "\n" );
if (null != (temp = genCanonicalizedAmzHeadersElement())) canonicalized.append( temp );
if (null != (temp = genCanonicalizedResourceElement() )) canonicalized.append( temp );
if ( 0 == canonicalized.length())
return null;
return canonicalized.toString();
}
/**
* CanonicalizedResource represents the Amazon S3 resource targeted by the request.
* CanonicalizedResource = [ "/" + Bucket ] +
* <HTTP-Request-URI, from the protocol name up to the query string> +
* [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
*
* @return A single string representing CanonicalizedResource or null.
*/
private String genCanonicalizedResourceElement() {
StringBuffer canonicalized = new StringBuffer();
if(this.useSubDomain && this.bucketName != null)
canonicalized.append( "/" ).append( this.bucketName );
if (null != this.uriPath ) canonicalized.append( this.uriPath );
if (null != this.queryString) canonicalized.append( this.queryString );
if ( 0 == canonicalized.length())
return null;
return canonicalized.toString();
}
/**
* Construct the Canonicalized Amazon headers element of the StringToSign by
* concatenating all headers in the TreeMap into a single string.
*
* @return A single string with all the Amazon headers glued together, or null
* if no Amazon headers appeared in the request.
*/
private String genCanonicalizedAmzHeadersElement() {
Collection<String> headers = AmazonHeaders.values();
Iterator<String> itr = headers.iterator();
StringBuffer canonicalized = new StringBuffer();
while( itr.hasNext())
canonicalized.append( itr.next());
if ( 0 == canonicalized.length())
return null;
return canonicalized.toString();
}
/**
* Create a signature by the following method:
* new String( Base64( SHA1( key, byte array )))
*
* @param signIt - the data to generate a keyed HMAC over
* @param secretKey - the user's unique key for the HMAC operation
* @return String - the recalculated string
* @throws SignatureException
*/
private String calculateRFC2104HMAC( String signIt, String secretKey )
throws SignatureException {
String result = null;
try {
SecretKeySpec key = new SecretKeySpec( secretKey.getBytes(), "HmacSHA1" );
Mac hmacSha1 = Mac.getInstance( "HmacSHA1" );
hmacSha1.init( key );
byte [] rawHmac = hmacSha1.doFinal( signIt.getBytes());
result = new String( Base64.encodeBase64( rawHmac ));
} catch( Exception e ) {
throw new SignatureException( "Failed to generate keyed HMAC on REST request: " + e.getMessage());
}
return result.trim();
}
}
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.util;
import java.security.InvalidKeyException;
import java.security.SignatureException;
import java.util.*;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.codec.binary.Base64;
import org.apache.log4j.Logger;
/**
* This class expects that the caller pulls the required headers from the standard
* HTTPServeletRequest structure. This class is responsible for providing the
* RFC2104 calculation to ensure that the signature is valid for the signing string.
* The signing string is a representation of the request.
* Notes are given below on what values are expected.
* This class is used for the Authentication check for REST requests and Query String
* Authentication requests.
*
* @author Kelven Yang, John Zucker, Salvatore Orlando
*/
public class RestAuth {
protected final static Logger logger = Logger.getLogger(RestAuth.class);
// TreeMap: used when constructing the CanonicalizedAmzHeaders Element of the StringToSign
protected TreeMap<String, String> AmazonHeaders = null; // not always present
protected String bucketName = null; // not always present
protected String queryString = null; // for CanonicalizedResource - only interested in a string starting with particular values
protected String uriPath = null; // only interested in the resource path
protected String date = null; // only if x-amz-date is not set
protected String contentType = null; // not always present
protected String contentMD5 = null; // not always present
protected boolean amzDateSet = false;
protected boolean useSubDomain = false;
protected Set<String> allowedQueryParams;
public RestAuth() {
// these must be lexicographically sorted
AmazonHeaders = new TreeMap<String, String>();
allowedQueryParams = new HashSet<String>() {{
add("acl");
add("lifecycle");
add("location");
add("logging");
add("notification");
add("partNumber");
add("policy");
add("requestPayment");
add("torrent");
add("uploadId");
add("uploads");
add("versionId");
add("versioning");
add("versions");
add("website");
}};
}
public RestAuth(boolean useSubDomain) {
//invoke the other constructor
this();
this.useSubDomain = useSubDomain;
}
public void setUseSubDomain(boolean value) {
useSubDomain = value;
}
public boolean getUseSubDomain() {
return useSubDomain;
}
/**
* This header is used iff the "x-amz-date:" header is not defined.
* Value is used in constructing the StringToSign for signature verification.
*
* @param date - the contents of the "Date:" header, skipping the 'Date:' preamble.
* OR pass in the value of the "Expires=" query string parameter passed in
* for "Query String Authentication".
*/
public void setDateHeader( String date ) {
if (this.amzDateSet) return;
if (null != date) date = date.trim();
this.date = date;
}
/**
* Value is used in constructing the StringToSign for signature verification.
*
* @param type - the contents of the "Content-Type:" header, skipping the 'Content-Type:' preamble.
*/
public void setContentTypeHeader( String type ) {
if (null != type) type = type.trim();
this.contentType = type;
}
/**
* Value is used in constructing the StringToSign for signature verification.
* @param type - the contents of the "Content-MD5:" header, skipping the 'Content-MD5:' preamble.
*/
public void setContentMD5Header( String md5 ) {
if (null != md5) md5 = md5.trim();
this.contentMD5 = md5;
}
/**
* The bucket name can be in the "Host:" header but it does not have to be. It can
* instead be in the uriPath as the first step in the path.
*
* Used as part of the CanonalizedResource element of the StringToSign.
* If we get "Host: static.johnsmith.net:8080", then the bucket name is "static.johnsmith.net"
*
* @param header - contents of the "Host:" header, skipping the 'Host:' preamble.
*/
public void setHostHeader( String header ) {
if (null == header) {
this.bucketName = null;
return;
}
// -> is there a port on the name?
header = header.trim();
int offset = header.indexOf( ":" );
if (-1 != offset) header = header.substring( 0, offset );
this.bucketName = header;
}
/**
* Used as part of the CanonalizedResource element of the StringToSign.
* CanonicalizedResource = [ "/" + Bucket ] +
* <HTTP-Request-URI, from the protocol name up to the query string> + [sub-resource]
* The list of sub-resources that must be included when constructing the CanonicalizedResource Element are: acl, lifecycle, location,
* logging, notification, partNumber, policy, requestPayment, torrent, uploadId, uploads, versionId, versioning, versions and website.
* (http://docs.amazonwebservices.com/AmazonS3/latest/dev/RESTAuthentication.html)
* @param query - results from calling "HttpServletRequest req.getQueryString()"
*/
public void setQueryString( String query ) {
if (null == query) {
this.queryString = null;
return;
}
// Sub-resources (i.e.: query params) must be lex sorted
Set<String> subResources = new TreeSet<String>();
String [] queryParams = query.split("&");
StringBuffer builtQuery= new StringBuffer();
for (String queryParam:queryParams) {
// lookup parameter name
String paramName = queryParam.split("=")[0];
if (allowedQueryParams.contains(paramName)) {
subResources.add(queryParam);
}
}
for (String subResource:subResources) {
builtQuery.append(subResource + "&");
}
// If anything inside the string buffer, add a "?" at the beginning,
// and then remove the last '&'
if (builtQuery.length() > 0) {
builtQuery.insert(0, "?");
builtQuery.deleteCharAt(builtQuery.length()-1);
}
this.queryString = builtQuery.toString();
}
/**
* Used as part of the CanonalizedResource element of the StringToSign.
* Append the path part of the un-decoded HTTP Request-URI, up-to but not including the query string.
*
* @param path - - results from calling "HttpServletRequest req.getPathInfo()"
*/
public void addUriPath( String path ) {
if (null != path) path = path.trim();
this.uriPath = path;
}
/**
* Pass in each complete Amazon header found in the HTTP request one at a time.
* Each Amazon header added will become part of the signature calculation.
* We are using a TreeMap here because of the S3 definition:
* "Sort the collection of headers lexicographically by header name."
*
* @param headerAndValue - needs to be the complete amazon header (i.e., starts with "x-amz").
*/
public void addAmazonHeader( String headerAndValue ) {
if (null == headerAndValue) return;
String canonicalized = null;
// [A] First Canonicalize the header and its value
// -> we use the header 'name' as the key since we have to sort on that
int offset = headerAndValue.indexOf( ":" );
String header = headerAndValue.substring( 0, offset+1 ).toLowerCase();
String value = headerAndValue.substring( offset+1 ).trim();
// -> RFC 2616, Section 4.2: unfold the header's value by replacing linear white space with a single space character
// -> does the HTTPServeletReq already do this for us?
value = value.replaceAll( " ", " " ); // -> multiple spaces to one space
value = value.replaceAll( "(\r\n|\t|\n)", " " ); // -> CRLF, tab, and LF to one space
// [B] Does this header already exist?
if ( AmazonHeaders.containsKey( header )) {
// -> combine header fields with the same name into one "header-name:comma-separated-value-list" pair as prescribed by RFC 2616, section 4.2, without any white-space between values.
canonicalized = AmazonHeaders.get( header );
canonicalized = new String( canonicalized + "," + value + "\n" );
canonicalized = canonicalized.replaceAll( "\n,", "," ); // remove the '\n' from the first stored value
}
else canonicalized = new String( header + value + "\n" ); // -> as per spec, no space between header and its value
AmazonHeaders.put( header, canonicalized );
// [C] "x-amz-date:" takes precedence over the "Date:" header
if (header.equals( "x-amz-date:" )) {
this.amzDateSet = true;
if (null != this.date) this.date = null;
}
}
/**
* The request is authenticated if we can regenerate the same signature given
* on the request. Before calling this function make sure to set the header values
* defined by the public values above.
*
* @param httpVerb - the type of HTTP request (e.g., GET, PUT)
* @param secretKey - value obtained from the AWSAccessKeyId
* @param signature - the signature we are trying to recreate, note can be URL-encoded
*
* @throws SignatureException
*
* @return true if request has been authenticated, false otherwise
* @throws UnsupportedEncodingException
*/
public boolean verifySignature( String httpVerb, String secretKey, String signature )
throws SignatureException, UnsupportedEncodingException {
if (null == httpVerb || null == secretKey || null == signature) return false;
httpVerb = httpVerb.trim();
secretKey = secretKey.trim();
signature = signature.trim();
// First calculate the StringToSign after the caller has initialized all the header values
String StringToSign = genStringToSign( httpVerb );
String calSig = calculateRFC2104HMAC( StringToSign, secretKey );
// Was the passed in signature URL encoded? (it must be base64 encoded)
int offset = signature.indexOf( "%" );
if (-1 != offset) signature = URLDecoder.decode( signature, "UTF-8" );
boolean match = signature.equals( calSig );
if (!match)
logger.error( "Signature mismatch, [" + signature + "] [" + calSig + "] over [" + StringToSign + "]" );
return match;
}
/**
* This function generates the single string that will be used to sign with a users
* secret key.
*
* StringToSign = HTTP-Verb + "\n" +
* Content-MD5 + "\n" +
* Content-Type + "\n" +
* Date + "\n" +
* CanonicalizedAmzHeaders +
* CanonicalizedResource;
*
* @return The single StringToSign or null.
*/
private String genStringToSign( String httpVerb ) {
StringBuffer canonicalized = new StringBuffer();
String temp = null;
String canonicalizedResourceElement = genCanonicalizedResourceElement();
canonicalized.append( httpVerb ).append( "\n" );
if ( (null != this.contentMD5) )
canonicalized.append( this.contentMD5 );
canonicalized.append( "\n" );
if ( (null != this.contentType) )
canonicalized.append( this.contentType );
canonicalized.append( "\n" );
if (null != this.date)
canonicalized.append( this.date );
canonicalized.append( "\n" );
if (null != (temp = genCanonicalizedAmzHeadersElement())) canonicalized.append( temp );
if (null != canonicalizedResourceElement) canonicalized.append( canonicalizedResourceElement );
if ( 0 == canonicalized.length())
return null;
return canonicalized.toString();
}
/**
* CanonicalizedResource represents the Amazon S3 resource targeted by the request.
* CanonicalizedResource = [ "/" + Bucket ] +
* <HTTP-Request-URI, from the protocol name up to the query string> +
* [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
*
* @return A single string representing CanonicalizedResource or null.
*/
private String genCanonicalizedResourceElement() {
StringBuffer canonicalized = new StringBuffer();
if(this.useSubDomain && this.bucketName != null)
canonicalized.append( "/" ).append( this.bucketName );
if (null != this.uriPath ) canonicalized.append( this.uriPath );
if (null != this.queryString) canonicalized.append( this.queryString );
if ( 0 == canonicalized.length())
return null;
return canonicalized.toString();
}
/**
* Construct the Canonicalized Amazon headers element of the StringToSign by
* concatenating all headers in the TreeMap into a single string.
*
* @return A single string with all the Amazon headers glued together, or null
* if no Amazon headers appeared in the request.
*/
private String genCanonicalizedAmzHeadersElement() {
Collection<String> headers = AmazonHeaders.values();
Iterator<String> itr = headers.iterator();
StringBuffer canonicalized = new StringBuffer();
while( itr.hasNext())
canonicalized.append( itr.next());
if ( 0 == canonicalized.length())
return null;
return canonicalized.toString();
}
/**
* Create a signature by the following method:
* new String( Base64( SHA1( key, byte array )))
*
* @param signIt - the data to generate a keyed HMAC over
* @param secretKey - the user's unique key for the HMAC operation
* @return String - the recalculated string
* @throws SignatureException
*/
private String calculateRFC2104HMAC( String signIt, String secretKey )
throws SignatureException {
String result = null;
try {
SecretKeySpec key = new SecretKeySpec( secretKey.getBytes(), "HmacSHA1" );
Mac hmacSha1 = Mac.getInstance( "HmacSHA1" );
hmacSha1.init( key );
byte [] rawHmac = hmacSha1.doFinal( signIt.getBytes());
result = new String( Base64.encodeBase64( rawHmac ));
}
catch( InvalidKeyException e ) {
throw new SignatureException( "Failed to generate keyed HMAC on REST request because key " + secretKey + " is invalid" + e.getMessage());
}
catch (Exception e) {
throw new SignatureException( "Failed to generate keyed HMAC on REST request: " + e.getMessage());
}
return result.trim();
}
}

View File

@ -1,51 +0,0 @@
/*
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloud.bridge.util;
/**
*
* @author Kelven Yang
*
* @param <T1>
* @param <T2>
*/
public class Tuple <T1, T2> {
T1 first;
T2 second;
public Tuple(T1 t1, T2 t2) {
first = t1;
second = t2;
}
public T1 getFirst() {
return first;
}
public Tuple<T1, T2> setFirst(T1 t1) {
first = t1;
return this;
}
public T2 getSecond() {
return second;
}
public Tuple<T1, T2> setSecond(T2 t2) {
second = t2;
return this;
}
}