From 4b0d7ccb310d0fa7d1e71c825d2e7cebfa27f333 Mon Sep 17 00:00:00 2001 From: JohnZ Date: Thu, 3 May 2012 15:06:30 +0100 Subject: [PATCH] Core S3 classes up to date --- .gitignore | 1 + .../bridge/auth/s3/AuthenticationHandler.java | 423 +- .../lifecycle/ServiceEngineLifecycle.java | 2 +- awsapi/src/com/cloud/bridge/model/SHost.java | 4 - .../com/cloud/bridge/persist/EntityDao.java | 15 + .../cloud/bridge/service/EC2RestServlet.java | 1 + .../cloud/bridge/service/EC2SoapService.java | 1 + .../com/cloud/bridge/service/PWCBHandler.java | 42 - .../cloud/bridge/service/S3BucketAdapter.java | 38 - .../service/S3FileSystemBucketAdapter.java | 245 -- .../cloud/bridge/service/S3SoapService.java | 115 - .../bridge/service/S3SoapServiceImpl.java | 700 --- .../cloud/bridge/service/ServiceProvider.java | 358 -- .../cloud/bridge/service/ServletAction.java | 29 - .../service/controller/s3/S3BucketAction.java | 1 + .../controller/s3/ServiceProvider.java | 716 ++-- .../bridge/service/core/s3/S3Engine.java | 3746 ++++++++--------- .../cloud/bridge/service/core/s3/S3Grant.java | 168 +- .../core/s3/S3ListAllMyBucketsEntry.java | 116 +- .../core/s3/S3PutObjectInlineRequest.java | 222 +- .../core/s3/S3PutObjectInlineResponse.java | 134 +- .../bridge/util/DimeDelimitedInputStream.java | 617 --- .../bridge/util/FileRangeDataSource.java | 61 - .../bridge/util/FileRangeInputStream.java | 96 - .../bridge/util/MultiPartDimeInputStream.java | 169 - .../src/com/cloud/bridge/util/RestAuth.java | 761 ++-- awsapi/src/com/cloud/bridge/util/Tuple.java | 51 - 27 files changed, 3185 insertions(+), 5647 deletions(-) delete mode 100644 awsapi/src/com/cloud/bridge/service/PWCBHandler.java delete mode 100644 awsapi/src/com/cloud/bridge/service/S3BucketAdapter.java delete mode 100644 awsapi/src/com/cloud/bridge/service/S3FileSystemBucketAdapter.java delete mode 100644 awsapi/src/com/cloud/bridge/service/S3SoapService.java delete mode 100644 awsapi/src/com/cloud/bridge/service/S3SoapServiceImpl.java delete mode 100644 awsapi/src/com/cloud/bridge/service/ServiceProvider.java delete mode 100644 awsapi/src/com/cloud/bridge/service/ServletAction.java delete mode 100644 awsapi/src/com/cloud/bridge/util/DimeDelimitedInputStream.java delete mode 100644 awsapi/src/com/cloud/bridge/util/FileRangeDataSource.java delete mode 100644 awsapi/src/com/cloud/bridge/util/FileRangeInputStream.java delete mode 100644 awsapi/src/com/cloud/bridge/util/MultiPartDimeInputStream.java delete mode 100644 awsapi/src/com/cloud/bridge/util/Tuple.java diff --git a/.gitignore b/.gitignore index 23cef739807..1bde90a7aca 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ build/replace.properties build/build.number bin/ +*.000 *.pem *.patch *.xml000 diff --git a/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java b/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java index fafbdcd3470..e7a29dd29ef 100644 --- a/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java +++ b/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java @@ -1,212 +1,213 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.auth.s3; - -import java.sql.SQLException; - -import javax.servlet.http.HttpServletRequest; - -import org.apache.axiom.soap.SOAPEnvelope; -import org.apache.axiom.soap.SOAPBody; -import org.apache.log4j.Logger; -import org.apache.axis2.context.MessageContext; -import org.apache.axis2.engine.Handler; -import org.apache.axis2.AxisFault; -import org.apache.axis2.description.HandlerDescription; -import org.apache.axis2.description.Parameter; - -import com.cloud.bridge.model.UserCredentials; -import com.cloud.bridge.persist.dao.UserCredentialsDao; -import com.cloud.bridge.service.UserContext; -import com.cloud.bridge.util.S3SoapAuth; - -/* - * For SOAP compatibility. - */ - -public class AuthenticationHandler implements Handler { - protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class); - - protected HandlerDescription handlerDesc = new HandlerDescription( "default handler" ); - private String name = "S3AuthenticationHandler"; - - public void init( HandlerDescription handlerdesc ) - { - this.handlerDesc = handlerdesc; - } - - public String getName() - { - //logger.debug( "getName entry S3AuthenticationHandler" + name ); - return name; - } - - public String toString() - { - return (name != null) ? name.toString() : null; - } - - public HandlerDescription getHandlerDesc() - { - return handlerDesc; - } - - public Parameter getParameter( String name ) - { - return handlerDesc.getParameter( name ); - } - - - /** - * Verify the request's authentication signature by extracting all the - * necessary parts of the request, obtaining the requestor's secret key, and - * recalculating the signature. - * - * On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3 - * defines as a "Client.SignatureMismatch" error. - * - * Special case: need to deal with anonymous requests where no AWSAccessKeyId is - * given. In this case just pass the request on. - */ - public InvocationResponse invoke(MessageContext msgContext) throws AxisFault - { - String accessKey = null; - String operation = null; - String msgSig = null; - String timestamp = null; - String secretKey = null; - String temp = null; - - // [A] Obtain the HttpServletRequest object - HttpServletRequest httpObj =(HttpServletRequest)msgContext.getProperty("transport.http.servletRequest"); - if (null != httpObj) System.out.println("S3 SOAP auth test header access - acceptable Encoding type: "+ httpObj.getHeader("Accept-Encoding")); - - // [A] Try to recalculate the signature for non-anonymous requests - try - { SOAPEnvelope soapEnvelope = msgContext.getEnvelope(); - SOAPBody soapBody = soapEnvelope.getBody(); - String xmlBody = soapBody.toString(); - //logger.debug( "xmlrequest: " + xmlBody ); - - // -> did we get here yet its an EC2 request? - int offset = xmlBody.indexOf( "http://ec2.amazonaws.com" ); - if (-1 != offset) return InvocationResponse.CONTINUE; - - - // -> if it is anonymous request, then no access key should exist - int start = xmlBody.indexOf( "AWSAccessKeyId>" ); - if (-1 == start) { - UserContext.current().initContext(); - return InvocationResponse.CONTINUE; - } - temp = xmlBody.substring( start+15 ); - int end = temp.indexOf( " what if we cannot find the user's key? - if (null != (secretKey = lookupSecretKey( accessKey ))) - { - // -> if any other field is missing, then the signature will not match - if ( null != (operation = soapBody.getFirstElementLocalName())) - operation = operation.trim(); - else operation = ""; - //logger.debug( "operation " + operation ); - - start = xmlBody.indexOf( "Timestamp>" ); - if ( -1 < start ) - { - temp = xmlBody.substring( start+10 ); - end = temp.indexOf( "" ); - if ( -1 < start ) - { - temp = xmlBody.substring( start+10 ); - end = temp.indexOf( " for SOAP requests the Cloud API keys are sent here and only here - S3SoapAuth.verifySignature( msgSig, operation, timestamp, accessKey, secretKey ); - UserContext.current().initContext( accessKey, secretKey, accessKey, "S3 SOAP request", httpObj ); - return InvocationResponse.CONTINUE; - } - - - public void revoke(MessageContext msgContext) - { - logger.info(msgContext.getEnvelope().toString()); - } - - public void setName(String name) - { - //logger.debug( "setName entry S3AuthenticationHandler " + name ); - this.name = name; - } - - /** - * Given the user's access key, then obtain his secret key in the user database. - * - * @param accessKey - a unique string allocated for each registered user - * @return the secret key or null of no matching user found - */ - private String lookupSecretKey( String accessKey ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - { - UserCredentialsDao credentialDao = new UserCredentialsDao(); - UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey ); - if ( null == cloudKeys ) { - logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" ); - return null; - } - else return cloudKeys.getSecretKey(); - } - - @Override - public void cleanup() - { - //logger.debug( "cleanup entry S3AuthenticationHandler " ); - } - - @Override - public void flowComplete( MessageContext arg0 ) - { - //logger.debug( "flowComplete entry S3AuthenticationHandler " ); - } +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.auth.s3; + +import java.sql.SQLException; + +import javax.servlet.http.HttpServletRequest; + +import org.apache.axiom.soap.SOAPEnvelope; +import org.apache.axiom.soap.SOAPBody; +import org.apache.log4j.Logger; +import org.apache.axis2.context.MessageContext; +import org.apache.axis2.engine.Handler; +import org.apache.axis2.AxisFault; +import org.apache.axis2.description.HandlerDescription; +import org.apache.axis2.description.Parameter; + +import com.cloud.bridge.model.UserCredentials; +import com.cloud.bridge.persist.dao.UserCredentialsDao; +import com.cloud.bridge.service.UserContext; +import com.cloud.bridge.util.S3SoapAuth; + +/* + * For SOAP compatibility. + */ + +public class AuthenticationHandler implements Handler { + protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class); + + protected HandlerDescription handlerDesc = new HandlerDescription( "default handler" ); + private String name = "S3AuthenticationHandler"; + + public void init( HandlerDescription handlerdesc ) + { + this.handlerDesc = handlerdesc; + } + + public String getName() + { + //logger.debug( "getName entry S3AuthenticationHandler" + name ); + return name; + } + + public String toString() + { + return (name != null) ? name.toString() : null; + } + + public HandlerDescription getHandlerDesc() + { + return handlerDesc; + } + + public Parameter getParameter( String name ) + { + return handlerDesc.getParameter( name ); + } + + + /** + * Verify the request's authentication signature by extracting all the + * necessary parts of the request, obtaining the requestor's secret key, and + * recalculating the signature. + * + * On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3 + * defines as a "Client.SignatureMismatch" error. + * + * Special case: need to deal with anonymous requests where no AWSAccessKeyId is + * given. In this case just pass the request on. + */ + public InvocationResponse invoke(MessageContext msgContext) throws AxisFault + { + String accessKey = null; + String operation = null; + String msgSig = null; + String timestamp = null; + String secretKey = null; + String temp = null; + + // [A] Obtain the HttpServletRequest object + HttpServletRequest httpObj =(HttpServletRequest)msgContext.getProperty("transport.http.servletRequest"); + if (null != httpObj) System.out.println("S3 SOAP auth test header access - acceptable Encoding type: "+ httpObj.getHeader("Accept-Encoding")); + + // [A] Try to recalculate the signature for non-anonymous requests + try + { SOAPEnvelope soapEnvelope = msgContext.getEnvelope(); + SOAPBody soapBody = soapEnvelope.getBody(); + String xmlBody = soapBody.toString(); + //logger.debug( "xmlrequest: " + xmlBody ); + + // -> did we get here yet its an EC2 request? + int offset = xmlBody.indexOf( "http://ec2.amazonaws.com" ); + if (-1 != offset) return InvocationResponse.CONTINUE; + + + // -> if it is anonymous request, then no access key should exist + int start = xmlBody.indexOf( "AWSAccessKeyId>" ); + if (-1 == start) { + UserContext.current().initContext(); + return InvocationResponse.CONTINUE; + } + temp = xmlBody.substring( start+15 ); + int end = temp.indexOf( " what if we cannot find the user's key? + if (null != (secretKey = lookupSecretKey( accessKey ))) + { + // -> if any other field is missing, then the signature will not match + if ( null != (operation = soapBody.getFirstElementLocalName())) + operation = operation.trim(); + else operation = ""; + //logger.debug( "operation " + operation ); + + start = xmlBody.indexOf( "Timestamp>" ); + if ( -1 < start ) + { + temp = xmlBody.substring( start+10 ); + end = temp.indexOf( "" ); + if ( -1 < start ) + { + temp = xmlBody.substring( start+10 ); + end = temp.indexOf( " for SOAP requests the Cloud API keys are sent here and only here + S3SoapAuth.verifySignature( msgSig, operation, timestamp, accessKey, secretKey ); + UserContext.current().initContext( accessKey, secretKey, accessKey, "S3 SOAP request", httpObj ); + return InvocationResponse.CONTINUE; + } + + + public void revoke(MessageContext msgContext) + { + logger.info(msgContext.getEnvelope().toString()); + } + + public void setName(String name) + { + //logger.debug( "setName entry S3AuthenticationHandler " + name ); + this.name = name; + } + + /** + * Given the user's access key, then obtain his secret key in the user database. + * + * @param accessKey - a unique string allocated for each registered user + * @return the secret key or null of no matching user found + */ + private String lookupSecretKey( String accessKey ) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + { + UserCredentialsDao credentialDao = new UserCredentialsDao(); + UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey ); + if ( null == cloudKeys ) { + logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" ); + return null; + } + else return cloudKeys.getSecretKey(); + } + + @Override + public void cleanup() + { + //logger.debug( "cleanup entry S3AuthenticationHandler " ); + } + + @Override + public void flowComplete( MessageContext arg0 ) + { + //logger.debug( "flowComplete entry S3AuthenticationHandler " ); + } } + diff --git a/awsapi/src/com/cloud/bridge/lifecycle/ServiceEngineLifecycle.java b/awsapi/src/com/cloud/bridge/lifecycle/ServiceEngineLifecycle.java index e34eb15cfa9..83060b6a1cf 100644 --- a/awsapi/src/com/cloud/bridge/lifecycle/ServiceEngineLifecycle.java +++ b/awsapi/src/com/cloud/bridge/lifecycle/ServiceEngineLifecycle.java @@ -18,8 +18,8 @@ package com.cloud.bridge.lifecycle; import org.apache.axis2.context.ConfigurationContext; import org.apache.axis2.description.AxisService; import org.apache.axis2.engine.ServiceLifeCycle; +import com.cloud.bridge.service.controller.s3.ServiceProvider; -import com.cloud.bridge.service.ServiceProvider; /** * @author Kelven Yang diff --git a/awsapi/src/com/cloud/bridge/model/SHost.java b/awsapi/src/com/cloud/bridge/model/SHost.java index d055c2d76bb..2d269684f9f 100644 --- a/awsapi/src/com/cloud/bridge/model/SHost.java +++ b/awsapi/src/com/cloud/bridge/model/SHost.java @@ -20,11 +20,7 @@ import java.util.HashSet; import java.util.Set; /** -<<<<<<< HEAD - * @author Kelven Yang -======= * @author Kelven Yang, John Zucker ->>>>>>> 6472e7b... Now really adding the renamed files! */ public class SHost implements Serializable { private static final long serialVersionUID = 213346565810468018L; diff --git a/awsapi/src/com/cloud/bridge/persist/EntityDao.java b/awsapi/src/com/cloud/bridge/persist/EntityDao.java index b8a097f2c12..be846a3167b 100644 --- a/awsapi/src/com/cloud/bridge/persist/EntityDao.java +++ b/awsapi/src/com/cloud/bridge/persist/EntityDao.java @@ -29,19 +29,34 @@ import com.cloud.bridge.util.QueryHelper; * response to queryEntities for a particular instantation of the EntityDao generic class, as defined here. * Any instantation of EntityDao passes in the class for which it is instantiating. For example a new instance of SBucketDao * passes in com.cloud.bridge.model.SBucket as its clazz. + * Instantiators, providing an Entity definition, are the classes + * MHostDao, + * MHostMountDao, + * SAclDao, + * SBucketDao, + * SHostDao, + * SMetaDao, + * SObjectDao, + * SObjectItemDao, + * CloudStackSvcOfferingDao */ public class EntityDao { private Class clazz; private boolean isCloudStackSession = false; + + // Constructor to implement CloudStackSvcOffering: see class CloudStackSvcOfferingDao public EntityDao(Class clazz, boolean isCloudStackSession) { this.clazz = clazz; this.isCloudStackSession = isCloudStackSession; // Note : beginTransaction can be called multiple times + // "If a new underlying transaction is required, begin the transaction. Otherwise continue the new work in the + // context of the existing underlying transaction." from the Hibernate spec PersistContext.beginTransaction(isCloudStackSession); } + // Standard constructor to implement MHostDao, MHostMountDao, SAclDao, SBucketDao, SHostDao, SMetaDao, SObjectDao, SObjectItemDao public EntityDao(Class clazz) { this.clazz = clazz; diff --git a/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java b/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java index 2033fd55266..075a92f346b 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java +++ b/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java @@ -96,6 +96,7 @@ import com.cloud.bridge.model.UserCredentials; import com.cloud.bridge.persist.PersistContext; import com.cloud.bridge.persist.dao.OfferingDao; import com.cloud.bridge.persist.dao.UserCredentialsDao; +import com.cloud.bridge.service.controller.s3.ServiceProvider; import com.cloud.bridge.service.core.ec2.EC2AssociateAddress; import com.cloud.bridge.service.core.ec2.EC2AuthorizeRevokeSecurityGroup; import com.cloud.bridge.service.core.ec2.EC2CreateImage; diff --git a/awsapi/src/com/cloud/bridge/service/EC2SoapService.java b/awsapi/src/com/cloud/bridge/service/EC2SoapService.java index 20e3df2dcae..0547c90bd5a 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2SoapService.java +++ b/awsapi/src/com/cloud/bridge/service/EC2SoapService.java @@ -18,6 +18,7 @@ package com.cloud.bridge.service; import org.apache.log4j.Logger; import com.amazon.ec2.*; +import com.cloud.bridge.service.controller.s3.ServiceProvider; public class EC2SoapService implements AmazonEC2SkeletonInterface { protected final static Logger logger = Logger.getLogger(EC2SoapService.class); diff --git a/awsapi/src/com/cloud/bridge/service/PWCBHandler.java b/awsapi/src/com/cloud/bridge/service/PWCBHandler.java deleted file mode 100644 index 58fbec66a88..00000000000 --- a/awsapi/src/com/cloud/bridge/service/PWCBHandler.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import java.io.IOException; - -import javax.security.auth.callback.Callback; -import javax.security.auth.callback.CallbackHandler; -import javax.security.auth.callback.UnsupportedCallbackException; - -import org.apache.ws.security.WSPasswordCallback; - -public class PWCBHandler implements CallbackHandler { - - @SuppressWarnings("deprecation") - public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { - for (int i = 0; i < callbacks.length; i++) { - WSPasswordCallback pwcb = (WSPasswordCallback)callbacks[i]; - String id = pwcb.getIdentifer(); - if ( "client".equals(id)) { - pwcb.setPassword("apache"); - } - else if("service".equals(id)) { - pwcb.setPassword("apache"); - } - } - } -} - diff --git a/awsapi/src/com/cloud/bridge/service/S3BucketAdapter.java b/awsapi/src/com/cloud/bridge/service/S3BucketAdapter.java deleted file mode 100644 index 8c38b514576..00000000000 --- a/awsapi/src/com/cloud/bridge/service/S3BucketAdapter.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import java.io.InputStream; -import java.io.OutputStream; - -import javax.activation.DataHandler; - -import com.cloud.bridge.service.core.s3.S3MultipartPart; -import com.cloud.bridge.util.Tuple; - -/** - * @author Kelven Yang - */ -public interface S3BucketAdapter { - void createContainer(String mountedRoot, String bucket); - void deleteContainer(String mountedRoot, String bucket); - String getBucketFolderDir(String mountedRoot, String bucket); - String saveObject(InputStream is, String mountedRoot, String bucket, String fileName); - DataHandler loadObject(String mountedRoot, String bucket, String fileName); - DataHandler loadObjectRange(String mountedRoot, String bucket, String fileName, long startPos, long endPos); - void deleteObject(String mountedRoot, String bucket, String fileName); - Tuple concatentateObjects(String mountedRoot, String destBucket, String fileName, String sourceBucket, S3MultipartPart[] parts, OutputStream os); -} diff --git a/awsapi/src/com/cloud/bridge/service/S3FileSystemBucketAdapter.java b/awsapi/src/com/cloud/bridge/service/S3FileSystemBucketAdapter.java deleted file mode 100644 index 5937596eb11..00000000000 --- a/awsapi/src/com/cloud/bridge/service/S3FileSystemBucketAdapter.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.MalformedURLException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; - -import javax.activation.DataHandler; -import javax.activation.DataSource; - -import org.apache.log4j.Logger; - -import com.cloud.bridge.service.core.s3.S3MultipartPart; -import com.cloud.bridge.service.exception.FileNotExistException; -import com.cloud.bridge.service.exception.InternalErrorException; -import com.cloud.bridge.service.exception.OutOfStorageException; -import com.cloud.bridge.util.FileRangeDataSource; -import com.cloud.bridge.util.StringHelper; -import com.cloud.bridge.util.Tuple; - -/** - * @author Kelven Yang - */ -public class S3FileSystemBucketAdapter implements S3BucketAdapter { - protected final static Logger logger = Logger.getLogger(S3FileSystemBucketAdapter.class); - - public S3FileSystemBucketAdapter() { - } - - @Override - public void createContainer(String mountedRoot, String bucket) { - - String dir = getBucketFolderDir(mountedRoot, bucket); - File container = new File(dir); - - if (!container.exists()) { - if (!container.mkdirs()) - throw new OutOfStorageException("Unable to create " + dir + " for bucket " + bucket); - } - } - - @Override - public void deleteContainer(String mountedRoot, String bucket) { - String dir = getBucketFolderDir(mountedRoot, bucket); - File path = new File(dir); - if(!deleteDirectory(path)) - throw new OutOfStorageException("Unable to delete " + dir + " for bucket " + bucket); - } - - @Override - public String getBucketFolderDir(String mountedRoot, String bucket) { - String bucketFolder = getBucketFolderName(bucket); - String dir; - String separator = ""+File.separatorChar; - if(!mountedRoot.endsWith(separator)) - dir = mountedRoot + separator + bucketFolder; - else - dir = mountedRoot + bucketFolder; - - return dir; - } - - @Override - public String saveObject(InputStream is, String mountedRoot, String bucket, String fileName) - { - FileOutputStream fos = null; - MessageDigest md5 = null; - - try { - md5 = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - throw new InternalErrorException("Unable to get MD5 MessageDigest", e); - } - - File file = new File(getBucketFolderDir(mountedRoot, bucket) + File.separatorChar + fileName); - try { - // -> when versioning is off we need to rewrite the file contents - file.delete(); - file.createNewFile(); - - fos = new FileOutputStream(file); - byte[] buffer = new byte[4096]; - int len = 0; - while( (len = is.read(buffer)) > 0) { - fos.write(buffer, 0, len); - md5.update(buffer, 0, len); - } - return StringHelper.toHexString(md5.digest()); - - } - catch(IOException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - throw new OutOfStorageException(e); - } - finally { - try { - if (null != fos) fos.close(); - } - catch( Exception e ) { - logger.error("Can't close FileOutputStream " + e.getMessage(), e); - } - } - } - - /** - * From a list of files (each being one part of the multipart upload), concatentate all files into a single - * object that can be accessed by normal S3 calls. This function could take a long time since a multipart is - * allowed to have upto 10,000 parts (each 5 gib long). Amazon defines that while this operation is in progress - * whitespace is sent back to the client inorder to keep the HTTP connection alive. - * - * @param mountedRoot - where both the source and dest buckets are located - * @param destBucket - resulting location of the concatenated objects - * @param fileName - resulting file name of the concatenated objects - * @param sourceBucket - special bucket used to save uploaded file parts - * @param parts - an array of file names in the sourceBucket - * @param client - if not null, then keep the servlet connection alive while this potentially long concatentation takes place - * @return Tuple with the first value the MD5 of the final object, and the second value the length of the final object - */ - @Override - public Tuple concatentateObjects(String mountedRoot, String destBucket, String fileName, String sourceBucket, S3MultipartPart[] parts, OutputStream client) - { - MessageDigest md5; - long totalLength = 0; - - try { - md5 = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - throw new InternalErrorException("Unable to get MD5 MessageDigest", e); - } - - File file = new File(getBucketFolderDir(mountedRoot, destBucket) + File.separatorChar + fileName); - try { - // -> when versioning is off we need to rewrite the file contents - file.delete(); - file.createNewFile(); - - final FileOutputStream fos = new FileOutputStream(file); - byte[] buffer = new byte[4096]; - - // -> get the input stream for the next file part - for( int i=0; i < parts.length; i++ ) - { - DataHandler nextPart = loadObject( mountedRoot, sourceBucket, parts[i].getPath()); - InputStream is = nextPart.getInputStream(); - - int len = 0; - while( (len = is.read(buffer)) > 0) { - fos.write(buffer, 0, len); - md5.update(buffer, 0, len); - totalLength += len; - } - is.close(); - - // -> after each file write tell the client we are still here to keep connection alive - if (null != client) { - client.write( new String(" ").getBytes()); - client.flush(); - } - } - fos.close(); - return new Tuple(StringHelper.toHexString(md5.digest()), new Long(totalLength)); - } - catch(IOException e) { - logger.error("concatentateObjects unexpected exception " + e.getMessage(), e); - throw new OutOfStorageException(e); - } - } - - @Override - public DataHandler loadObject(String mountedRoot, String bucket, String fileName) { - File file = new File(getBucketFolderDir(mountedRoot, bucket) + File.separatorChar + fileName); - try { - return new DataHandler(file.toURL()); - } catch (MalformedURLException e) { - throw new FileNotExistException("Unable to open underlying object file"); - } - } - - @Override - public void deleteObject(String mountedRoot, String bucket, String fileName) { - String filePath = new String( getBucketFolderDir(mountedRoot, bucket) + File.separatorChar + fileName ); - File file = new File( filePath ); - if (!file.delete()) { - logger.error("file: " + filePath + ", f=" + file.isFile() + ", h=" + file.isHidden() + ", e=" + file.exists() + ", w=" + file.canWrite()); - throw new OutOfStorageException( "Unable to delete " + filePath + " for object deletion" ); - } - } - - @Override - public DataHandler loadObjectRange(String mountedRoot, String bucket, String fileName, long startPos, long endPos) { - File file = new File(getBucketFolderDir(mountedRoot, bucket) + File.separatorChar + fileName); - try { - DataSource ds = new FileRangeDataSource(file, startPos, endPos); - return new DataHandler(ds); - } catch (MalformedURLException e) { - throw new FileNotExistException("Unable to open underlying object file"); - } catch(IOException e) { - throw new FileNotExistException("Unable to open underlying object file"); - } - } - - public static boolean deleteDirectory(File path) { - if( path.exists() ) { - File[] files = path.listFiles(); - for(int i = 0; i < files.length; i++) { - if(files[i].isDirectory()) { - deleteDirectory(files[i]); - } else { - files[i].delete(); - } - } - } - return path.delete(); - } - - private String getBucketFolderName(String bucket) { - // temporary - String name = bucket.replace(' ', '_'); - name = bucket.replace('\\', '-'); - name = bucket.replace('/', '-'); - - return name; - } -} diff --git a/awsapi/src/com/cloud/bridge/service/S3SoapService.java b/awsapi/src/com/cloud/bridge/service/S3SoapService.java deleted file mode 100644 index 5732c8945fe..00000000000 --- a/awsapi/src/com/cloud/bridge/service/S3SoapService.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import org.apache.axis2.AxisFault; -import org.apache.log4j.Logger; - -import com.amazon.s3.*; - -/** - * @author Kelven Yang - */ -public class S3SoapService implements AmazonS3SkeletonInterface { - protected final static Logger logger = Logger.getLogger(S3SoapService.class); - - public GetBucketLoggingStatusResponse getBucketLoggingStatus(GetBucketLoggingStatus getBucketLoggingStatus0) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.getBucketLoggingStatus(getBucketLoggingStatus0); - } - - public CopyObjectResponse copyObject(com.amazon.s3.CopyObject copyObject2) throws AxisFault { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.copyObject(copyObject2); - } - - public GetBucketAccessControlPolicyResponse getBucketAccessControlPolicy ( - GetBucketAccessControlPolicy getBucketAccessControlPolicy4) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.getBucketAccessControlPolicy (getBucketAccessControlPolicy4); - } - - public ListBucketResponse listBucket (ListBucket listBucket6) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.listBucket (listBucket6); - } - - public PutObjectResponse putObject(PutObject putObject8) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.putObject(putObject8); - } - - public CreateBucketResponse createBucket (CreateBucket createBucket) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.createBucket(createBucket); - } - - public ListAllMyBucketsResponse listAllMyBuckets ( - ListAllMyBuckets listAllMyBuckets12) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.listAllMyBuckets (listAllMyBuckets12); - } - - public GetObjectResponse getObject(com.amazon.s3.GetObject getObject14) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.getObject(getObject14); - } - - public DeleteBucketResponse deleteBucket(DeleteBucket deleteBucket16) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.deleteBucket(deleteBucket16); - } - - public SetBucketLoggingStatusResponse setBucketLoggingStatus( - SetBucketLoggingStatus setBucketLoggingStatus18) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.setBucketLoggingStatus(setBucketLoggingStatus18); - } - - - public GetObjectAccessControlPolicyResponse getObjectAccessControlPolicy( - GetObjectAccessControlPolicy getObjectAccessControlPolicy20) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.getObjectAccessControlPolicy(getObjectAccessControlPolicy20); - } - - public DeleteObjectResponse deleteObject (DeleteObject deleteObject22) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.deleteObject (deleteObject22); - } - - public SetBucketAccessControlPolicyResponse setBucketAccessControlPolicy( - SetBucketAccessControlPolicy setBucketAccessControlPolicy24) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.setBucketAccessControlPolicy(setBucketAccessControlPolicy24); - } - - public SetObjectAccessControlPolicyResponse setObjectAccessControlPolicy( - SetObjectAccessControlPolicy setObjectAccessControlPolicy26) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.setObjectAccessControlPolicy(setObjectAccessControlPolicy26); - } - - public PutObjectInlineResponse putObjectInline (PutObjectInline putObjectInline28) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.putObjectInline (putObjectInline28); - } - - public GetObjectExtendedResponse getObjectExtended(GetObjectExtended getObjectExtended30) { - AmazonS3SkeletonInterface s3Service = ServiceProvider.getInstance().getServiceImpl(AmazonS3SkeletonInterface.class); - return s3Service.getObjectExtended(getObjectExtended30); - } -} diff --git a/awsapi/src/com/cloud/bridge/service/S3SoapServiceImpl.java b/awsapi/src/com/cloud/bridge/service/S3SoapServiceImpl.java deleted file mode 100644 index 5b2aa9e615c..00000000000 --- a/awsapi/src/com/cloud/bridge/service/S3SoapServiceImpl.java +++ /dev/null @@ -1,700 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import java.io.IOException; -import java.util.Calendar; - -import org.apache.axis2.AxisFault; -import org.apache.log4j.Logger; - -import com.amazon.s3.AccessControlList; -import com.amazon.s3.AccessControlPolicy; -import com.amazon.s3.AmazonS3SkeletonInterface; -import com.amazon.s3.CanonicalUser; -import com.amazon.s3.CopyObject; -import com.amazon.s3.CopyObjectResult; -import com.amazon.s3.Group; -import com.amazon.s3.CopyObjectResponse; -import com.amazon.s3.CreateBucket; -import com.amazon.s3.CreateBucketResponse; -import com.amazon.s3.CreateBucketResult; -import com.amazon.s3.DeleteBucket; -import com.amazon.s3.DeleteBucketResponse; -import com.amazon.s3.DeleteObject; -import com.amazon.s3.DeleteObjectResponse; -import com.amazon.s3.GetBucketAccessControlPolicy; -import com.amazon.s3.GetBucketAccessControlPolicyResponse; -import com.amazon.s3.GetBucketLoggingStatus; -import com.amazon.s3.GetBucketLoggingStatusResponse; -import com.amazon.s3.GetObject; -import com.amazon.s3.GetObjectAccessControlPolicy; -import com.amazon.s3.GetObjectAccessControlPolicyResponse; -import com.amazon.s3.GetObjectExtended; -import com.amazon.s3.GetObjectExtendedResponse; -import com.amazon.s3.GetObjectResponse; -import com.amazon.s3.GetObjectResult; -import com.amazon.s3.Grant; -import com.amazon.s3.Grantee; -import com.amazon.s3.ListAllMyBuckets; -import com.amazon.s3.ListAllMyBucketsEntry; -import com.amazon.s3.ListAllMyBucketsList; -import com.amazon.s3.ListAllMyBucketsResponse; -import com.amazon.s3.ListAllMyBucketsResult; -import com.amazon.s3.ListBucket; -import com.amazon.s3.ListBucketResponse; -import com.amazon.s3.ListBucketResult; -import com.amazon.s3.ListEntry; -import com.amazon.s3.MetadataDirective; -import com.amazon.s3.MetadataEntry; -import com.amazon.s3.Permission; -import com.amazon.s3.PrefixEntry; -import com.amazon.s3.PutObject; -import com.amazon.s3.PutObjectInline; -import com.amazon.s3.PutObjectInlineResponse; -import com.amazon.s3.PutObjectResponse; -import com.amazon.s3.PutObjectResult; -import com.amazon.s3.SetBucketAccessControlPolicy; -import com.amazon.s3.SetBucketAccessControlPolicyResponse; -import com.amazon.s3.SetBucketLoggingStatus; -import com.amazon.s3.SetBucketLoggingStatusResponse; -import com.amazon.s3.SetObjectAccessControlPolicy; -import com.amazon.s3.SetObjectAccessControlPolicyResponse; -import com.amazon.s3.Status; -import com.amazon.s3.StorageClass; -import com.cloud.bridge.model.SAcl; -import com.cloud.bridge.service.core.s3.S3AccessControlList; -import com.cloud.bridge.service.core.s3.S3AccessControlPolicy; -import com.cloud.bridge.service.core.s3.S3CanonicalUser; -import com.cloud.bridge.service.core.s3.S3ConditionalHeaders; -import com.cloud.bridge.service.core.s3.S3CopyObjectRequest; -import com.cloud.bridge.service.core.s3.S3CopyObjectResponse; -import com.cloud.bridge.service.core.s3.S3CreateBucketRequest; -import com.cloud.bridge.service.core.s3.S3CreateBucketResponse; -import com.cloud.bridge.service.core.s3.S3DeleteBucketRequest; -import com.cloud.bridge.service.core.s3.S3DeleteObjectRequest; -import com.cloud.bridge.service.core.s3.S3Engine; -import com.cloud.bridge.service.core.s3.S3GetBucketAccessControlPolicyRequest; -import com.cloud.bridge.service.core.s3.S3GetObjectAccessControlPolicyRequest; -import com.cloud.bridge.service.core.s3.S3GetObjectRequest; -import com.cloud.bridge.service.core.s3.S3GetObjectResponse; -import com.cloud.bridge.service.core.s3.S3Grant; -import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsEntry; -import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsRequest; -import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsResponse; -import com.cloud.bridge.service.core.s3.S3ListBucketObjectEntry; -import com.cloud.bridge.service.core.s3.S3ListBucketPrefixEntry; -import com.cloud.bridge.service.core.s3.S3ListBucketRequest; -import com.cloud.bridge.service.core.s3.S3ListBucketResponse; -import com.cloud.bridge.service.core.s3.S3MetaDataEntry; -import com.cloud.bridge.service.core.s3.S3PutObjectInlineRequest; -import com.cloud.bridge.service.core.s3.S3PutObjectInlineResponse; -import com.cloud.bridge.service.core.s3.S3Response; -import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest; -import com.cloud.bridge.service.core.s3.S3SetObjectAccessControlPolicyRequest; -import com.cloud.bridge.service.exception.InternalErrorException; - -public class S3SoapServiceImpl implements AmazonS3SkeletonInterface { - protected final static Logger logger = Logger.getLogger(S3SoapServiceImpl.class); - - private S3Engine engine; - - public S3SoapServiceImpl(S3Engine engine) { - this.engine = engine; - } - - public GetBucketLoggingStatusResponse getBucketLoggingStatus( - GetBucketLoggingStatus getBucketLoggingStatus) { - throw new UnsupportedOperationException("Unsupported API"); - } - - public SetBucketLoggingStatusResponse setBucketLoggingStatus(SetBucketLoggingStatus setBucketLoggingStatus) { - throw new UnsupportedOperationException("Unsupported API"); - } - - public CopyObjectResponse copyObject(CopyObject copyObject) throws AxisFault { - S3CopyObjectRequest request = new S3CopyObjectRequest(); - - request.setSourceBucketName(copyObject.getSourceBucket()); - request.setSourceKey(copyObject.getSourceKey()); - request.setDestinationBucketName(copyObject.getDestinationBucket()); - request.setDestinationKey(copyObject.getDestinationKey()); - - MetadataDirective mdd = copyObject.getMetadataDirective(); - if (null != mdd) request.setDataDirective(mdd.getValue()); - - request.setMetaEntries(toEngineMetaEntries(copyObject.getMetadata())); - request.setAcl(toEngineAccessControlList(copyObject.getAccessControlList())); - - S3ConditionalHeaders conds = new S3ConditionalHeaders(); - conds.setModifiedSince(copyObject.getCopySourceIfModifiedSince()); - conds.setUnModifiedSince(copyObject.getCopySourceIfUnmodifiedSince()); - conds.setMatch(copyObject.getCopySourceIfMatch()); - conds.setNoneMatch(copyObject.getCopySourceIfNoneMatch()); - request.setConditions(conds); - - return toCopyObjectResponse(engine.handleRequest(request)); - } - - public GetBucketAccessControlPolicyResponse getBucketAccessControlPolicy( - GetBucketAccessControlPolicy getBucketAccessControlPolicy) { - // after authentication, we should setup user context - return toGetBucketAccessControlPolicyResponse(engine.handleRequest( - toEngineGetBucketAccessControlPolicyRequest(getBucketAccessControlPolicy))); - } - - private S3GetBucketAccessControlPolicyRequest toEngineGetBucketAccessControlPolicyRequest( - GetBucketAccessControlPolicy getBucketAccessControlPolicy) { - S3GetBucketAccessControlPolicyRequest request = new S3GetBucketAccessControlPolicyRequest(); - - request.setAccessKey(getBucketAccessControlPolicy.getAWSAccessKeyId()); - request.setRequestTimestamp(getBucketAccessControlPolicy.getTimestamp()); - request.setSignature(getBucketAccessControlPolicy.getSignature()); - request.setBucketName(getBucketAccessControlPolicy.getBucket()); - return request; - } - - public static GetBucketAccessControlPolicyResponse toGetBucketAccessControlPolicyResponse(S3AccessControlPolicy policy) { - GetBucketAccessControlPolicyResponse response = new GetBucketAccessControlPolicyResponse(); - response.setGetBucketAccessControlPolicyResponse(toAccessControlPolicy(policy)); - return response; - } - - public SetBucketAccessControlPolicyResponse setBucketAccessControlPolicy(SetBucketAccessControlPolicy setBucketAccessControlPolicy) { - S3SetBucketAccessControlPolicyRequest request = new S3SetBucketAccessControlPolicyRequest(); - request.setAccessKey(setBucketAccessControlPolicy.getAWSAccessKeyId()); - request.setRequestTimestamp(setBucketAccessControlPolicy.getTimestamp()); - request.setSignature(setBucketAccessControlPolicy.getSignature()); - request.setBucketName(setBucketAccessControlPolicy.getBucket()); - request.setAcl(toEngineAccessControlList(setBucketAccessControlPolicy.getAccessControlList())); - - S3Response basicResponse = engine.handleRequest(request); - SetBucketAccessControlPolicyResponse response = new SetBucketAccessControlPolicyResponse(); - return response; - } - - public ListBucketResponse listBucket (ListBucket listBucket) { - // after authentication, we should setup user context - return toListBucketResponse(engine.listBucketContents(toEngineListBucketRequest(listBucket), false)); - } - - private S3ListBucketRequest toEngineListBucketRequest(ListBucket listBucket) { - S3ListBucketRequest request = new S3ListBucketRequest(); - - request.setAccessKey(listBucket.getAWSAccessKeyId()); - request.setRequestTimestamp(listBucket.getTimestamp()); - request.setSignature(listBucket.getSignature()); - - request.setBucketName(listBucket.getBucket()); - request.setDelimiter(listBucket.getDelimiter()); - request.setMarker(listBucket.getMarker()); - request.setMaxKeys(listBucket.getMaxKeys()); - request.setPrefix(listBucket.getPrefix()); - return request; - } - - public static ListBucketResponse toListBucketResponse(S3ListBucketResponse engineResponse) { - ListBucketResponse response = new ListBucketResponse(); - ListBucketResult result = new ListBucketResult(); - result.setName(engineResponse.getBucketName()); - result.setDelimiter(engineResponse.getDelimiter()); - result.setPrefix(engineResponse.getPrefix()); - result.setMarker(engineResponse.getMarker()); - result.setMaxKeys(engineResponse.getMaxKeys()); - result.setIsTruncated(engineResponse.isTruncated()); - result.setNextMarker(engineResponse.getNextMarker()); - result.setCommonPrefixes(toPrefixEntry(engineResponse.getCommonPrefixes())); - result.setContents(toListEntry(engineResponse.getContents())); - response.setListBucketResponse(result); - return response; - } - - private static PrefixEntry[] toPrefixEntry(S3ListBucketPrefixEntry[] engineEntries) { - if(engineEntries != null) { - PrefixEntry[] entries = new PrefixEntry[engineEntries.length]; - for(int i = 0; i < engineEntries.length; i++) { - entries[i] = new PrefixEntry(); - entries[i].setPrefix(engineEntries[i].getPrefix()); - } - - return entries; - } - return null; - } - - private static ListEntry[] toListEntry(S3ListBucketObjectEntry[] engineEntries) { - if(engineEntries != null) { - ListEntry[] entries = new ListEntry[engineEntries.length]; - for(int i = 0; i < engineEntries.length; i++) { - entries[i] = new ListEntry(); - entries[i].setETag(engineEntries[i].getETag()); - entries[i].setKey(engineEntries[i].getKey()); - entries[i].setLastModified(engineEntries[i].getLastModified()); - entries[i].setSize(engineEntries[i].getSize()); - entries[i].setStorageClass(StorageClass.STANDARD); - - CanonicalUser owner = new CanonicalUser(); - owner.setID(engineEntries[i].getOwnerCanonicalId()); - owner.setDisplayName(engineEntries[i].getOwnerDisplayName()); - entries[i].setOwner(owner); - } - return entries; - } - - return null; - } - - public PutObjectResponse putObject(PutObject putObject) { - //TODO : fill this with the necessary business logic - throw new UnsupportedOperationException("Please implement " + this.getClass().getName() + "#putObject"); - } - - public CreateBucketResponse createBucket (CreateBucket createBucket) { - return toCreateBucketResponse(engine.handleRequest(toEngineCreateBucketRequest(createBucket))); - } - - private S3CreateBucketRequest toEngineCreateBucketRequest(CreateBucket createBucket) { - S3CreateBucketRequest request = new S3CreateBucketRequest(); - request.setAccessKey(createBucket.getAWSAccessKeyId()); - request.setRequestTimestamp(createBucket.getTimestamp()); - request.setSignature(createBucket.getSignature()); - request.setBucketName(createBucket.getBucket()); - request.setAcl(toEngineAccessControlList(createBucket.getAccessControlList())); - return request; - } - - private CreateBucketResponse toCreateBucketResponse(S3CreateBucketResponse engineResponse) { - CreateBucketResponse response = new CreateBucketResponse(); - CreateBucketResult result = new CreateBucketResult(); - result.setBucketName(engineResponse.getBucketName()); - response.setCreateBucketReturn(result); - return response; - } - - public ListAllMyBucketsResponse listAllMyBuckets (ListAllMyBuckets listAllMyBuckets) { - return toListAllMyBucketsResponse(engine.handleRequest(toEngineListAllMyBucketsRequest(listAllMyBuckets))); - } - - private S3ListAllMyBucketsRequest toEngineListAllMyBucketsRequest(ListAllMyBuckets listAllMyBuckets) { - S3ListAllMyBucketsRequest request = new S3ListAllMyBucketsRequest(); - request.setAccessKey(listAllMyBuckets.getAWSAccessKeyId()); - request.setRequestTimestamp(listAllMyBuckets.getTimestamp()); - request.setSignature(listAllMyBuckets.getSignature()); - return request; - } - - public static ListAllMyBucketsResponse toListAllMyBucketsResponse(S3ListAllMyBucketsResponse engineResponse) { - ListAllMyBucketsResponse response = new ListAllMyBucketsResponse(); - ListAllMyBucketsResult result = new ListAllMyBucketsResult(); - ListAllMyBucketsEntry[] entries = null; - - S3CanonicalUser ownerEngine = engineResponse.getOwner(); - CanonicalUser owner = new CanonicalUser(); - owner.setID(ownerEngine.getID()); - owner.setDisplayName(ownerEngine.getDisplayName()); - result.setOwner(owner); - S3ListAllMyBucketsEntry[] engineEntries = engineResponse.getBuckets(); - if (engineEntries != null) { - entries = new ListAllMyBucketsEntry[engineEntries.length]; - for(int i = 0; i < engineEntries.length; i++) { - entries[i] = new ListAllMyBucketsEntry(); - entries[i].setName(engineEntries[i].getName()); - entries[i].setCreationDate(engineEntries[i].getCreationDate()); - } - - ListAllMyBucketsList list = new ListAllMyBucketsList(); - list.setBucket(entries); - result.setBuckets(list); - } - response.setListAllMyBucketsResponse(result); - return response; - } - - public DeleteBucketResponse deleteBucket(DeleteBucket deleteBucket) { - return toDeleteBucketResponse(engine.handleRequest(toEngineDeleteBucketRequest(deleteBucket))); - } - - private S3DeleteBucketRequest toEngineDeleteBucketRequest(DeleteBucket deleteBucket) { - S3DeleteBucketRequest request = new S3DeleteBucketRequest(); - request.setAccessKey(deleteBucket.getAWSAccessKeyId()); - request.setRequestTimestamp(deleteBucket.getTimestamp()); - request.setSignature(deleteBucket.getSignature()); - request.setBucketName(deleteBucket.getBucket()); - return request; - } - - private DeleteBucketResponse toDeleteBucketResponse(S3Response engineResponse) { - DeleteBucketResponse response = new DeleteBucketResponse(); - Status status = new Status(); - status.setCode(engineResponse.getResultCode()); - status.setDescription(engineResponse.getResultDescription()); - response.setDeleteBucketResponse(status); - return response; - } - - public GetObjectResponse getObject(com.amazon.s3.GetObject getObject) { - return toGetObjectResponse(engine.handleRequest(toEngineGetObjectRequest(getObject))); - } - - public GetObjectExtendedResponse getObjectExtended(GetObjectExtended getObjectExtended) { - return toGetObjectExtendedResponse(engine.handleRequest(toEngineGetObjectRequest(getObjectExtended))); - } - - private S3GetObjectRequest toEngineGetObjectRequest(GetObject getObject) - { - S3GetObjectRequest request = new S3GetObjectRequest(); - - request.setAccessKey(getObject.getAWSAccessKeyId()); - request.setRequestTimestamp(getObject.getTimestamp()); - request.setSignature(getObject.getSignature()); - request.setBucketName(getObject.getBucket()); - request.setKey(getObject.getKey()); - request.setReturnData(getObject.getGetData()); - request.setReturnMetadata(getObject.getGetMetadata()); - request.setInlineData(getObject.getInlineData()); - return request; - } - - private S3GetObjectRequest toEngineGetObjectRequest(GetObjectExtended getObjectExtended) { - S3GetObjectRequest request = new S3GetObjectRequest(); - request.setAccessKey(getObjectExtended.getAWSAccessKeyId()); - request.setRequestTimestamp(getObjectExtended.getTimestamp()); - request.setSignature(getObjectExtended.getSignature()); - request.setBucketName(getObjectExtended.getBucket()); - request.setKey(getObjectExtended.getKey()); - request.setReturnData(getObjectExtended.getGetData()); - request.setReturnMetadata(getObjectExtended.getGetMetadata()); - request.setInlineData(getObjectExtended.getInlineData()); - - S3ConditionalHeaders conds = new S3ConditionalHeaders(); - conds.setModifiedSince(getObjectExtended.getIfModifiedSince()); - conds.setUnModifiedSince(getObjectExtended.getIfUnmodifiedSince()); - conds.setMatch(getObjectExtended.getIfMatch()); - conds.setNoneMatch(getObjectExtended.getIfNoneMatch()); - request.setConditions(conds); - - request.setByteRangeStart(getObjectExtended.getByteRangeStart()); - request.setByteRangeEnd(getObjectExtended.getByteRangeEnd()); - request.setReturnCompleteObjectOnConditionFailure(getObjectExtended.getReturnCompleteObjectOnConditionFailure()); - return request; - } - - private GetObjectResponse toGetObjectResponse(S3GetObjectResponse engineResponse) { - GetObjectResponse response = new GetObjectResponse(); - int resultCode = engineResponse.getResultCode(); - - GetObjectResult result = new GetObjectResult(); - Status param1 = new Status(); - param1.setCode( resultCode); - param1.setDescription( engineResponse.getResultDescription()); - result.setStatus( param1 ); - - if ( 200 == resultCode ) - { - result.setData(engineResponse.getData()); - result.setETag( engineResponse.getETag()); - result.setMetadata(toMetadataEntry(engineResponse.getMetaEntries())); - result.setLastModified( engineResponse.getLastModified()); - } - else - { result.setETag( "" ); - result.setLastModified( Calendar.getInstance()); - } - - response.setGetObjectResponse(result); - return response; - } - - private GetObjectExtendedResponse toGetObjectExtendedResponse(S3GetObjectResponse engineResponse) { - GetObjectExtendedResponse response = new GetObjectExtendedResponse(); - int resultCode = engineResponse.getResultCode(); - - GetObjectResult result = new GetObjectResult(); - Status param1 = new Status(); - param1.setCode( resultCode ); - param1.setDescription( engineResponse.getResultDescription()); - result.setStatus( param1 ); - - if ( 200 == resultCode || 206 == resultCode ) - { - result.setData(engineResponse.getData()); - result.setETag( engineResponse.getETag()); - result.setMetadata(toMetadataEntry(engineResponse.getMetaEntries())); - result.setLastModified( engineResponse.getLastModified()); - } - else - { result.setETag( "" ); - result.setLastModified( Calendar.getInstance()); - } - - response.setGetObjectResponse(result); - return response; - } - - private MetadataEntry[] toMetadataEntry(S3MetaDataEntry[] engineEntries) { - if(engineEntries != null) { - MetadataEntry[] entries = new MetadataEntry[engineEntries.length]; - for(int i = 0; i < engineEntries.length; i++) { - entries[i] = new MetadataEntry(); - entries[i].setName(engineEntries[i].getName()); - entries[i].setValue(engineEntries[i].getValue()); - } - return entries; - } - return null; - } - - public GetObjectAccessControlPolicyResponse getObjectAccessControlPolicy( - GetObjectAccessControlPolicy getObjectAccessControlPolicy) { - return toGetObjectAccessControlPolicyResponse(engine.handleRequest( - toEngineGetObjectAccessControlPolicyRequest(getObjectAccessControlPolicy))); - } - - private S3GetObjectAccessControlPolicyRequest toEngineGetObjectAccessControlPolicyRequest( - GetObjectAccessControlPolicy getObjectAccessControlPolicy) { - S3GetObjectAccessControlPolicyRequest request = new S3GetObjectAccessControlPolicyRequest(); - - request.setAccessKey(getObjectAccessControlPolicy.getAWSAccessKeyId()); - request.setRequestTimestamp(getObjectAccessControlPolicy.getTimestamp()); - request.setSignature(getObjectAccessControlPolicy.getSignature()); - request.setBucketName(getObjectAccessControlPolicy.getBucket()); - request.setKey(getObjectAccessControlPolicy.getKey()); - return request; - } - - public static GetObjectAccessControlPolicyResponse toGetObjectAccessControlPolicyResponse(S3AccessControlPolicy policy) { - GetObjectAccessControlPolicyResponse response = new GetObjectAccessControlPolicyResponse(); - response.setGetObjectAccessControlPolicyResponse(toAccessControlPolicy(policy)); - return response; - } - - private static AccessControlPolicy toAccessControlPolicy(S3AccessControlPolicy enginePolicy) { - AccessControlPolicy policy = new AccessControlPolicy(); - CanonicalUser owner = new CanonicalUser(); - owner.setID(enginePolicy.getOwner().getID()); - owner.setDisplayName(enginePolicy.getOwner().getDisplayName()); - policy.setOwner(owner); - - AccessControlList acl = new AccessControlList(); - acl.setGrant(toGrants(enginePolicy.getGrants())); - policy.setAccessControlList(acl); - return policy; - } - - public DeleteObjectResponse deleteObject (DeleteObject deleteObject) { - return toDeleteObjectResponse(engine.handleRequest(toEngineDeleteObjectRequest(deleteObject))); - } - - private S3DeleteObjectRequest toEngineDeleteObjectRequest(DeleteObject deleteObject) { - S3DeleteObjectRequest request = new S3DeleteObjectRequest(); - request.setAccessKey(deleteObject.getAWSAccessKeyId()); - request.setRequestTimestamp(deleteObject.getTimestamp()); - request.setSignature(deleteObject.getSignature()); - request.setBucketName(deleteObject.getBucket()); - request.setKey(deleteObject.getKey()); - return request; - } - - private DeleteObjectResponse toDeleteObjectResponse(S3Response engineResponse) { - DeleteObjectResponse response = new DeleteObjectResponse(); - Status status = new Status(); - status.setCode(engineResponse.getResultCode()); - status.setDescription(engineResponse.getResultDescription()); - response.setDeleteObjectResponse(status); - return response; - } - - public SetObjectAccessControlPolicyResponse setObjectAccessControlPolicy(SetObjectAccessControlPolicy setObjectAccessControlPolicy) - { - S3SetObjectAccessControlPolicyRequest request = new S3SetObjectAccessControlPolicyRequest(); - request.setAccessKey(setObjectAccessControlPolicy.getAWSAccessKeyId()); - request.setRequestTimestamp(setObjectAccessControlPolicy.getTimestamp()); - request.setSignature(setObjectAccessControlPolicy.getSignature()); - request.setBucketName(setObjectAccessControlPolicy.getBucket()); - request.setKey(setObjectAccessControlPolicy.getKey()); - request.setAcl(toEngineAccessControlList(setObjectAccessControlPolicy.getAccessControlList())); - - engine.handleRequest(request); - SetObjectAccessControlPolicyResponse response = new SetObjectAccessControlPolicyResponse(); - return response; - } - - public PutObjectInlineResponse putObjectInline (PutObjectInline putObjectInline) { - return toPutObjectInlineResponse(engine.handleRequest(toEnginePutObjectInlineRequest(putObjectInline))); - } - - private S3PutObjectInlineRequest toEnginePutObjectInlineRequest(PutObjectInline putObjectInline) { - S3PutObjectInlineRequest request = new S3PutObjectInlineRequest(); - request.setAccessKey(putObjectInline.getAWSAccessKeyId()); - request.setRequestTimestamp(putObjectInline.getTimestamp()); - request.setSignature(putObjectInline.getSignature()); - request.setBucketName(putObjectInline.getBucket()); - request.setContentLength(putObjectInline.getContentLength()); - request.setKey(putObjectInline.getKey()); - request.setData(putObjectInline.getData()); - request.setMetaEntries(toEngineMetaEntries(putObjectInline.getMetadata())); - request.setAcl(toEngineAccessControlList(putObjectInline.getAccessControlList())); - return request; - } - - private S3MetaDataEntry[] toEngineMetaEntries(MetadataEntry[] metaEntries) { - if(metaEntries != null) { - S3MetaDataEntry[] engineMetaEntries = new S3MetaDataEntry[metaEntries.length]; - for(int i = 0; i < metaEntries.length; i++) { - engineMetaEntries[i] = new S3MetaDataEntry(); - engineMetaEntries[i].setName(metaEntries[i].getName()); - engineMetaEntries[i].setValue(metaEntries[i].getValue()); - } - return engineMetaEntries; - } - return null; - } - - private S3AccessControlList toEngineAccessControlList(AccessControlList acl) - { - if (acl == null) return null; - - S3AccessControlList engineAcl = new S3AccessControlList(); - - Grant[] grants = acl.getGrant(); - if (grants != null) - { - for (Grant grant: grants) - { - S3Grant engineGrant = new S3Grant(); - - Grantee grantee = grant.getGrantee(); - if (grantee instanceof CanonicalUser) - { - engineGrant.setGrantee(SAcl.GRANTEE_USER); - engineGrant.setCanonicalUserID(((CanonicalUser)grantee).getID()); - } - else if (grantee instanceof Group) - { - Group temp = (Group)grantee; - String uri = temp.getURI(); - if ( uri.equalsIgnoreCase( "http://acs.amazonaws.com/groups/global/AllUsers" )) { - // -> this allows all public unauthenticated access based on permission given - engineGrant.setGrantee(SAcl.GRANTEE_ALLUSERS); - engineGrant.setCanonicalUserID( "*" ); - } - else if (uri.equalsIgnoreCase( "http://acs.amazonaws.com/groups/global/Authenticated" )) { - // -> this allows any authenticated user access based on permission given - engineGrant.setGrantee(SAcl.GRANTEE_AUTHENTICATED); - engineGrant.setCanonicalUserID( "A" ); - } - else throw new UnsupportedOperationException("Unsupported grantee group URI: " + uri ); - - } - else throw new UnsupportedOperationException("Unsupported grantee type: " + grantee.getClass().getCanonicalName()); - - Permission permission = grant.getPermission(); - String permissionValue = permission.getValue(); - if(permissionValue.equalsIgnoreCase("READ")) { - engineGrant.setPermission(SAcl.PERMISSION_READ); - } else if(permissionValue.equalsIgnoreCase("WRITE")) { - engineGrant.setPermission(SAcl.PERMISSION_WRITE); - } else if(permissionValue.equalsIgnoreCase("READ_ACP")) { - engineGrant.setPermission(SAcl.PERMISSION_READ_ACL); - } else if(permissionValue.equalsIgnoreCase("WRITE_ACP")) { - engineGrant.setPermission(SAcl.PERMISSION_WRITE_ACL); - } else if(permissionValue.equalsIgnoreCase("FULL_CONTROL")) { - engineGrant.setPermission(SAcl.PERMISSION_FULL); - } else { - throw new UnsupportedOperationException("Unsupported permission: " + permissionValue); - } - engineAcl.addGrant(engineGrant); - } - } - return engineAcl; - } - - private static Grant[] toGrants(S3Grant[] engineGrants) { - Grantee grantee = null; - Grant[] grants = null; - - if (engineGrants != null && 0 < engineGrants.length) - { - grants = new Grant[engineGrants.length]; - for(int i = 0; i < engineGrants.length; i++) - { - grants[i] = new Grant(); - - switch( engineGrants[i].getGrantee()) { - case SAcl.GRANTEE_USER : - grantee = new CanonicalUser(); - ((CanonicalUser)grantee).setID(engineGrants[i].getCanonicalUserID()); - ((CanonicalUser)grantee).setDisplayName("TODO"); - grants[i].setGrantee(grantee); - break; - - case SAcl.GRANTEE_ALLUSERS: - grantee = new Group(); - ((Group)grantee).setURI( "http://acs.amazonaws.com/groups/global/AllUsers" ); - grants[i].setGrantee(grantee); - break; - - case SAcl.GRANTEE_AUTHENTICATED: - grantee = new Group(); - ((Group)grantee).setURI( "http://acs.amazonaws.com/groups/global/Authenticated" ); - grants[i].setGrantee(grantee); - break; - - default : - throw new InternalErrorException("Unsupported grantee type"); - } - - - switch( engineGrants[i].getPermission()) { - case SAcl.PERMISSION_READ: grants[i].setPermission(Permission.READ); break; - case SAcl.PERMISSION_WRITE: grants[i].setPermission(Permission.WRITE); break; - case SAcl.PERMISSION_READ_ACL: grants[i].setPermission(Permission.READ_ACP); break; - case SAcl.PERMISSION_WRITE_ACL: grants[i].setPermission(Permission.WRITE_ACP); break; - case SAcl.PERMISSION_FULL: grants[i].setPermission(Permission.FULL_CONTROL); break; - } - } - return grants; - } - return null; - } - - private PutObjectInlineResponse toPutObjectInlineResponse(S3PutObjectInlineResponse engineResponse) { - PutObjectInlineResponse response = new PutObjectInlineResponse(); - - PutObjectResult result = new PutObjectResult(); - result.setETag(engineResponse.getETag()); - result.setLastModified(engineResponse.getLastModified()); - response.setPutObjectInlineResponse(result); - return response; - } - - public static CopyObjectResponse toCopyObjectResponse(S3CopyObjectResponse engineResponse) throws AxisFault { - CopyObjectResponse response = new CopyObjectResponse(); - int resultCode = engineResponse.getResultCode(); - - CopyObjectResult result = new CopyObjectResult(); - if ( 300 <= resultCode ) - { - String description = engineResponse.getResultDescription(); - throw new AxisFault( "" + resultCode, (null == description ? "" : description)); - } - - result.setETag( "\"" + engineResponse.getETag() + "\"" ); - result.setLastModified(engineResponse.getLastModified()); - response.setCopyObjectResult(result); - return response; - } -} diff --git a/awsapi/src/com/cloud/bridge/service/ServiceProvider.java b/awsapi/src/com/cloud/bridge/service/ServiceProvider.java deleted file mode 100644 index 3f154e76eda..00000000000 --- a/awsapi/src/com/cloud/bridge/service/ServiceProvider.java +++ /dev/null @@ -1,358 +0,0 @@ -/** - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved - * - * This software is licensed under the GNU General Public License v3 or later. - * - * It is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - */ -package com.cloud.bridge.service; - - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.net.InetAddress; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; -import java.util.Timer; -import java.util.TimerTask; - -import org.apache.axis2.AxisFault; -import org.apache.log4j.Logger; -import org.apache.log4j.xml.DOMConfigurator; -import org.hibernate.SessionException; - -import com.amazon.s3.AmazonS3SkeletonInterface; -import com.amazon.ec2.AmazonEC2SkeletonInterface; -import com.cloud.bridge.model.MHost; -import com.cloud.bridge.model.SHost; -import com.cloud.bridge.model.UserCredentials; -import com.cloud.bridge.persist.PersistContext; -import com.cloud.bridge.persist.PersistException; -import com.cloud.bridge.persist.dao.MHostDao; -import com.cloud.bridge.persist.dao.SHostDao; -import com.cloud.bridge.persist.dao.UserCredentialsDao; -import com.cloud.bridge.service.core.ec2.EC2Engine; -import com.cloud.bridge.service.core.s3.S3BucketPolicy; -import com.cloud.bridge.service.core.s3.S3Engine; -import com.cloud.bridge.service.exception.ConfigurationException; -import com.cloud.bridge.util.ConfigurationHelper; -import com.cloud.bridge.util.DateHelper; -import com.cloud.bridge.util.NetHelper; -import com.cloud.bridge.util.Tuple; - -/** - * @author Kelven Yang - */ -public class ServiceProvider { - protected final static Logger logger = Logger.getLogger(ServiceProvider.class); - - public final static long HEARTBEAT_INTERVAL = 10000; - - private static ServiceProvider instance; - - private Map, Object> serviceMap = new HashMap, Object>(); - private Timer timer = new Timer(); - private MHost mhost; - private Properties properties; - private boolean useSubDomain = false; // use DNS sub domain for bucket name - private String serviceEndpoint = null; - private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts - private String masterDomain = ".s3.amazonaws.com"; - private S3Engine engine; - private EC2Engine EC2_engine = null; - - // -> cache Bucket Policies here so we don't have to load from db on every access - private Map policyMap = new HashMap(); - - protected ServiceProvider() throws IOException { - // register service implementation object - engine = new S3Engine(); - EC2_engine = new EC2Engine(); - serviceMap.put(AmazonS3SkeletonInterface.class, new S3SoapServiceImpl(engine)); - serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine)); - } - - public synchronized static ServiceProvider getInstance() { - if(instance == null) - { - try { - instance = new ServiceProvider(); - instance.initialize(); - PersistContext.commitTransaction(); - } catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } finally { - PersistContext.closeSession(); - } - } - return instance; - } - - public long getManagementHostId() { - // we want to limit mhost within its own session, id of the value will be returned - long mhostId = 0; - if(mhost != null) - mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L; - return mhostId; - } - - /** - * We return a tuple to distinguish between two cases: - * (1) there is no entry in the map for bucketName, and (2) there is a null entry - * in the map for bucketName. In case 2, the database was inspected for the - * bucket policy but it had none so we remember it here to reduce database lookups. - * @param bucketName - * @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined - * even if its set at null. - */ - public Tuple getBucketPolicy(String bucketName) { - - if (policyMap.containsKey( bucketName )) { - S3BucketPolicy policy = policyMap.get( bucketName ); - return new Tuple( policy, 0 ); - } - else return new Tuple( null, -1 ); - } - - /** - * The policy parameter can be set to null, which means that there is no policy - * for the bucket so a database lookup is not necessary. - * - * @param bucketName - * @param policy - */ - public void setBucketPolicy(String bucketName, S3BucketPolicy policy) { - policyMap.put(bucketName, policy); - } - - public void deleteBucketPolicy(String bucketName) { - policyMap.remove(bucketName); - } - - public S3Engine getS3Engine() { - return engine; - } - - public EC2Engine getEC2Engine() { - return EC2_engine; - } - - public String getMasterDomain() { - return masterDomain; - } - - public boolean getUseSubDomain() { - return useSubDomain; - } - - public String getServiceEndpoint() { - return serviceEndpoint; - } - - public String getMultipartDir() { - return multipartDir; - } - - public Properties getStartupProperties() { - return properties; - } - - public UserInfo getUserInfo(String accessKey) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { - UserInfo info = new UserInfo(); - - UserCredentialsDao credentialDao = new UserCredentialsDao(); - UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey ); - if ( null == cloudKeys ) { - logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" ); - return null; - } else { - info.setAccessKey( accessKey ); - info.setSecretKey( cloudKeys.getSecretKey()); - info.setCanonicalUserId(accessKey); - info.setDescription( "S3 REST request" ); - return info; - } - } - - protected void initialize() { - if(logger.isInfoEnabled()) - logger.info("Initializing ServiceProvider..."); - - File file = ConfigurationHelper.findConfigurationFile("log4j-cloud.xml"); - if(file != null) { - System.out.println("Log4j configuration from : " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); - } else { - System.out.println("Configure log4j with default properties"); - } - - loadStartupProperties(); - String hostKey = properties.getProperty("host.key"); - if(hostKey == null) { - InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress(); - if(inetAddr != null) - hostKey = NetHelper.getMacAddress(inetAddr); - } - if(hostKey == null) - throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites"); - String host = properties.getProperty("host"); - if(host == null) - host = NetHelper.getHostName(); - - if(properties.get("bucket.dns") != null && - ((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) { - useSubDomain = true; - } - - serviceEndpoint = (String)properties.get("serviceEndpoint"); - masterDomain = new String( "." + serviceEndpoint ); - - setupHost(hostKey, host); - - // we will commit and start a new transaction to allow host info be flushed to DB - PersistContext.flush(); - - String localStorageRoot = properties.getProperty("storage.root"); - if (localStorageRoot != null) setupLocalStorage(localStorageRoot); - - multipartDir = properties.getProperty("storage.multipartDir"); - - timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL); - - if(logger.isInfoEnabled()) - logger.info("ServiceProvider initialized"); - } - - private void loadStartupProperties() { - File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties"); - properties = new Properties(); - if(propertiesFile != null) { - try { - properties.load(new FileInputStream(propertiesFile)); - } catch (FileNotFoundException e) { - logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); - } catch (IOException e) { - logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); - } - - logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath()); - } else { - if(logger.isInfoEnabled()) - logger.info("Startup properties is not found."); - } - } - - private TimerTask getHeartbeatTask() { - return new TimerTask() { - - @Override - public void run() { - try { - MHostDao mhostDao = new MHostDao(); - mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); - mhostDao.update(mhost); - PersistContext.commitTransaction(); - } catch(Throwable e){ - logger.error("Unexpected exception " + e.getMessage(), e); - } finally { - PersistContext.closeSession(); - } - } - }; - } - - private void setupHost(String hostKey, String host) { - MHostDao mhostDao = new MHostDao(); - mhost = mhostDao.getByHostKey(hostKey); - if(mhost == null) { - mhost = new MHost(); - mhost.setHostKey(hostKey); - mhost.setHost(host); - mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); - mhostDao.save(mhost); - } else { - mhost.setHost(host); - mhostDao.update(mhost); - } - } - - private void setupLocalStorage(String storageRoot) { - SHostDao shostDao = new SHostDao(); - SHost shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot); - if(shost == null) { - shost = new SHost(); - shost.setMhost(mhost); - mhost.getLocalSHosts().add(shost); - shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL); - shost.setHost(NetHelper.getHostName()); - shost.setExportRoot(storageRoot); - PersistContext.getSession().save(shost); - } - } - - public void shutdown() { - timer.cancel(); - - if(logger.isInfoEnabled()) - logger.info("ServiceProvider stopped"); - } - - @SuppressWarnings("unchecked") - private static T getProxy(Class serviceInterface, final T serviceObject) { - return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(), - new Class[] { serviceInterface }, - new InvocationHandler() { - public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { - Object result = null; - try { - result = method.invoke(serviceObject, args); - PersistContext.commitTransaction(); - PersistContext.commitTransaction(true); - } catch (PersistException e) { - } catch (SessionException e) { - } catch(Throwable e) { - // Rethrow the exception to Axis: - // Check if the exception is an AxisFault or a RuntimeException - // enveloped AxisFault and if so, pass it on as such. Otherwise - // log to help debugging and throw as is. - if (e.getCause() != null && e.getCause() instanceof AxisFault) - throw e.getCause(); - else if (e.getCause() != null && e.getCause().getCause() != null - && e.getCause().getCause() instanceof AxisFault) - throw e.getCause().getCause(); - else { - logger.warn("Unhandled exception " + e.getMessage(), e); - throw e; - } - } finally { - PersistContext.closeSession(); - PersistContext.closeSession(true); - } - return result; - } - }); - } - - @SuppressWarnings("unchecked") - public T getServiceImpl(Class serviceInterface) { - return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface)); - } -} diff --git a/awsapi/src/com/cloud/bridge/service/ServletAction.java b/awsapi/src/com/cloud/bridge/service/ServletAction.java deleted file mode 100644 index 5355dca4ffd..00000000000 --- a/awsapi/src/com/cloud/bridge/service/ServletAction.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import java.io.IOException; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.xml.stream.XMLStreamException; - -/** - * @author Kelven Yang - */ -public interface ServletAction { - void execute(HttpServletRequest request, HttpServletResponse response) throws IOException, XMLStreamException; -} diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java b/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java index c614509c3cb..0504a533178 100644 --- a/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java +++ b/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java @@ -53,6 +53,7 @@ import com.cloud.bridge.persist.dao.SAclDao; import com.cloud.bridge.persist.dao.SBucketDao; import com.cloud.bridge.service.S3Constants; import com.cloud.bridge.service.S3RestServlet; +import com.cloud.bridge.service.controller.s3.ServiceProvider; import com.cloud.bridge.service.UserContext; import com.cloud.bridge.service.core.s3.S3AccessControlList; import com.cloud.bridge.service.core.s3.S3AccessControlPolicy; diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java b/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java index 4ddc2678561..c00f2a202a8 100644 --- a/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java +++ b/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java @@ -1,358 +1,358 @@ -/** - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved - * - * This software is licensed under the GNU General Public License v3 or later. - * - * It is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - */ -package com.cloud.bridge.service.controller.s3; - - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.net.InetAddress; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; -import java.util.Timer; -import java.util.TimerTask; - -import org.apache.axis2.AxisFault; -import org.apache.log4j.Logger; -import org.apache.log4j.xml.DOMConfigurator; -import org.hibernate.SessionException; - -import com.amazon.s3.AmazonS3SkeletonInterface; -import com.amazon.ec2.AmazonEC2SkeletonInterface; -import com.cloud.bridge.model.MHost; -import com.cloud.bridge.model.SHost; -import com.cloud.bridge.model.UserCredentials; -import com.cloud.bridge.persist.PersistContext; -import com.cloud.bridge.persist.PersistException; -import com.cloud.bridge.persist.dao.MHostDao; -import com.cloud.bridge.persist.dao.SHostDao; -import com.cloud.bridge.persist.dao.UserCredentialsDao; -import com.cloud.bridge.service.EC2SoapServiceImpl; -import com.cloud.bridge.service.UserInfo; -import com.cloud.bridge.service.core.ec2.EC2Engine; -import com.cloud.bridge.service.core.s3.S3BucketPolicy; -import com.cloud.bridge.service.core.s3.S3Engine; -import com.cloud.bridge.service.exception.ConfigurationException; -import com.cloud.bridge.util.ConfigurationHelper; -import com.cloud.bridge.util.DateHelper; -import com.cloud.bridge.util.NetHelper; -import com.cloud.bridge.util.OrderedPair; - -/** - * @author Kelven Yang - */ -public class ServiceProvider { - protected final static Logger logger = Logger.getLogger(ServiceProvider.class); - - public final static long HEARTBEAT_INTERVAL = 10000; - - private static ServiceProvider instance; - - private Map, Object> serviceMap = new HashMap, Object>(); - private Timer timer = new Timer(); - private MHost mhost; - private Properties properties; - private boolean useSubDomain = false; // use DNS sub domain for bucket name - private String serviceEndpoint = null; - private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts - private String masterDomain = ".s3.amazonaws.com"; - private S3Engine engine; - private EC2Engine EC2_engine = null; - - // -> cache Bucket Policies here so we don't have to load from db on every access - private Map policyMap = new HashMap(); - - protected ServiceProvider() throws IOException { - // register service implementation object - engine = new S3Engine(); - EC2_engine = new EC2Engine(); - serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine)); - serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine)); - } - - public synchronized static ServiceProvider getInstance() { - if(instance == null) - { - try { - instance = new ServiceProvider(); - instance.initialize(); - PersistContext.commitTransaction(); - } catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } finally { - PersistContext.closeSession(); - } - } - return instance; - } - - public long getManagementHostId() { - // we want to limit mhost within its own session, id of the value will be returned - long mhostId = 0; - if(mhost != null) - mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L; - return mhostId; - } - - /** - * We return a 2-tuple to distinguish between two cases: - * (1) there is no entry in the map for bucketName, and (2) there is a null entry - * in the map for bucketName. In case 2, the database was inspected for the - * bucket policy but it had none so we cache it here to reduce database lookups. - * @param bucketName - * @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined - * even if it is set at null. - */ - public OrderedPair getBucketPolicy(String bucketName) { - - if (policyMap.containsKey( bucketName )) { - S3BucketPolicy policy = policyMap.get( bucketName ); - return new OrderedPair( policy, 0 ); - } - else return new OrderedPair( null, -1 ); // For case (1) where the map has no entry for bucketName - } - - /** - * The policy parameter can be set to null, which means that there is no policy - * for the bucket so a database lookup is not necessary. - * - * @param bucketName - * @param policy - */ - public void setBucketPolicy(String bucketName, S3BucketPolicy policy) { - policyMap.put(bucketName, policy); - } - - public void deleteBucketPolicy(String bucketName) { - policyMap.remove(bucketName); - } - - public S3Engine getS3Engine() { - return engine; - } - - public EC2Engine getEC2Engine() { - return EC2_engine; - } - - public String getMasterDomain() { - return masterDomain; - } - - public boolean getUseSubDomain() { - return useSubDomain; - } - - public String getServiceEndpoint() { - return serviceEndpoint; - } - - public String getMultipartDir() { - return multipartDir; - } - - public Properties getStartupProperties() { - return properties; - } - - public UserInfo getUserInfo(String accessKey) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { - UserInfo info = new UserInfo(); - - UserCredentialsDao credentialDao = new UserCredentialsDao(); - UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey ); - if ( null == cloudKeys ) { - logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" ); - return null; - } else { - info.setAccessKey( accessKey ); - info.setSecretKey( cloudKeys.getSecretKey()); - info.setCanonicalUserId(accessKey); - info.setDescription( "S3 REST request" ); - return info; - } - } - - protected void initialize() { - if(logger.isInfoEnabled()) - logger.info("Initializing ServiceProvider..."); - - File file = ConfigurationHelper.findConfigurationFile("log4j-cloud-bridge.xml"); - if(file != null) { - System.out.println("Log4j configuration from : " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); - } else { - System.out.println("Configure log4j with default properties"); - } - - loadStartupProperties(); - String hostKey = properties.getProperty("host.key"); - if(hostKey == null) { - InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress(); - if(inetAddr != null) - hostKey = NetHelper.getMacAddress(inetAddr); - } - if(hostKey == null) - throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites"); - String host = properties.getProperty("host"); - if(host == null) - host = NetHelper.getHostName(); - - if(properties.get("bucket.dns") != null && - ((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) { - useSubDomain = true; - } - - serviceEndpoint = (String)properties.get("serviceEndpoint"); - masterDomain = new String( "." + serviceEndpoint ); - - setupHost(hostKey, host); - - // we will commit and start a new transaction to allow host info be flushed to DB - PersistContext.flush(); - - String localStorageRoot = properties.getProperty("storage.root"); - if (localStorageRoot != null) setupLocalStorage(localStorageRoot); - - multipartDir = properties.getProperty("storage.multipartDir"); - - timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL); - - if(logger.isInfoEnabled()) - logger.info("ServiceProvider initialized"); - } - - private void loadStartupProperties() { - File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties"); - properties = new Properties(); - if(propertiesFile != null) { - try { - properties.load(new FileInputStream(propertiesFile)); - } catch (FileNotFoundException e) { - logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); - } catch (IOException e) { - logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); - } - - logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath()); - } else { - if(logger.isInfoEnabled()) - logger.info("Startup properties is not found."); - } - } - - private TimerTask getHeartbeatTask() { - return new TimerTask() { - - @Override - public void run() { - try { - MHostDao mhostDao = new MHostDao(); - mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); - mhostDao.update(mhost); - PersistContext.commitTransaction(); - } catch(Throwable e){ - logger.error("Unexpected exception " + e.getMessage(), e); - } finally { - PersistContext.closeSession(); - } - } - }; - } - - private void setupHost(String hostKey, String host) { - MHostDao mhostDao = new MHostDao(); - mhost = mhostDao.getByHostKey(hostKey); - if(mhost == null) { - mhost = new MHost(); - mhost.setHostKey(hostKey); - mhost.setHost(host); - mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); - mhostDao.save(mhost); - } else { - mhost.setHost(host); - mhostDao.update(mhost); - } - } - - private void setupLocalStorage(String storageRoot) { - SHostDao shostDao = new SHostDao(); - SHost shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot); - if(shost == null) { - shost = new SHost(); - shost.setMhost(mhost); - mhost.getLocalSHosts().add(shost); - shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL); - shost.setHost(NetHelper.getHostName()); - shost.setExportRoot(storageRoot); - PersistContext.getSession().save(shost); - } - } - - public void shutdown() { - timer.cancel(); - - if(logger.isInfoEnabled()) - logger.info("ServiceProvider stopped"); - } - - @SuppressWarnings("unchecked") - private static T getProxy(Class serviceInterface, final T serviceObject) { - return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(), - new Class[] { serviceInterface }, - new InvocationHandler() { - public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { - Object result = null; - try { - result = method.invoke(serviceObject, args); - PersistContext.commitTransaction(); - } catch (PersistException e) { - } catch (SessionException e) { - } catch(Throwable e) { - // Rethrow the exception to Axis: - // Check if the exception is an AxisFault or a RuntimeException - // enveloped AxisFault and if so, pass it on as such. Otherwise - // log to help debugging and throw as is. - if (e.getCause() != null && e.getCause() instanceof AxisFault) - throw e.getCause(); - else if (e.getCause() != null && e.getCause().getCause() != null - && e.getCause().getCause() instanceof AxisFault) - throw e.getCause().getCause(); - else { - logger.warn("Unhandled exception " + e.getMessage(), e); - throw e; - } - } finally { - PersistContext.closeSession(); - } - return result; - } - }); - } - - @SuppressWarnings("unchecked") - public T getServiceImpl(Class serviceInterface) { - return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface)); - } -} +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ +package com.cloud.bridge.service.controller.s3; + + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetAddress; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.Timer; +import java.util.TimerTask; + +import org.apache.axis2.AxisFault; +import org.apache.log4j.Logger; +import org.apache.log4j.xml.DOMConfigurator; +import org.hibernate.SessionException; + +import com.amazon.s3.AmazonS3SkeletonInterface; +import com.amazon.ec2.AmazonEC2SkeletonInterface; +import com.cloud.bridge.model.MHost; +import com.cloud.bridge.model.SHost; +import com.cloud.bridge.model.UserCredentials; +import com.cloud.bridge.persist.PersistContext; +import com.cloud.bridge.persist.PersistException; +import com.cloud.bridge.persist.dao.MHostDao; +import com.cloud.bridge.persist.dao.SHostDao; +import com.cloud.bridge.persist.dao.UserCredentialsDao; +import com.cloud.bridge.service.EC2SoapServiceImpl; +import com.cloud.bridge.service.UserInfo; +import com.cloud.bridge.service.core.ec2.EC2Engine; +import com.cloud.bridge.service.core.s3.S3BucketPolicy; +import com.cloud.bridge.service.core.s3.S3Engine; +import com.cloud.bridge.service.exception.ConfigurationException; +import com.cloud.bridge.util.ConfigurationHelper; +import com.cloud.bridge.util.DateHelper; +import com.cloud.bridge.util.NetHelper; +import com.cloud.bridge.util.OrderedPair; + +/** + * @author Kelven Yang + */ +public class ServiceProvider { + protected final static Logger logger = Logger.getLogger(ServiceProvider.class); + + public final static long HEARTBEAT_INTERVAL = 10000; + + private static ServiceProvider instance; + + private Map, Object> serviceMap = new HashMap, Object>(); + private Timer timer = new Timer(); + private MHost mhost; + private Properties properties; + private boolean useSubDomain = false; // use DNS sub domain for bucket name + private String serviceEndpoint = null; + private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts + private String masterDomain = ".s3.amazonaws.com"; + private S3Engine engine; + private EC2Engine EC2_engine = null; + + // -> cache Bucket Policies here so we don't have to load from db on every access + private Map policyMap = new HashMap(); + + protected ServiceProvider() throws IOException { + // register service implementation object + engine = new S3Engine(); + EC2_engine = new EC2Engine(); + serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine)); + serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine)); + } + + public synchronized static ServiceProvider getInstance() { + if(instance == null) + { + try { + instance = new ServiceProvider(); + instance.initialize(); + PersistContext.commitTransaction(); + } catch(Throwable e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } finally { + PersistContext.closeSession(); + } + } + return instance; + } + + public long getManagementHostId() { + // we want to limit mhost within its own session, id of the value will be returned + long mhostId = 0; + if(mhost != null) + mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L; + return mhostId; + } + + /** + * We return a 2-tuple to distinguish between two cases: + * (1) there is no entry in the map for bucketName, and (2) there is a null entry + * in the map for bucketName. In case 2, the database was inspected for the + * bucket policy but it had none so we cache it here to reduce database lookups. + * @param bucketName + * @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined + * even if it is set at null. + */ + public OrderedPair getBucketPolicy(String bucketName) { + + if (policyMap.containsKey( bucketName )) { + S3BucketPolicy policy = policyMap.get( bucketName ); + return new OrderedPair( policy, 0 ); + } + else return new OrderedPair( null, -1 ); // For case (1) where the map has no entry for bucketName + } + + /** + * The policy parameter can be set to null, which means that there is no policy + * for the bucket so a database lookup is not necessary. + * + * @param bucketName + * @param policy + */ + public void setBucketPolicy(String bucketName, S3BucketPolicy policy) { + policyMap.put(bucketName, policy); + } + + public void deleteBucketPolicy(String bucketName) { + policyMap.remove(bucketName); + } + + public S3Engine getS3Engine() { + return engine; + } + + public EC2Engine getEC2Engine() { + return EC2_engine; + } + + public String getMasterDomain() { + return masterDomain; + } + + public boolean getUseSubDomain() { + return useSubDomain; + } + + public String getServiceEndpoint() { + return serviceEndpoint; + } + + public String getMultipartDir() { + return multipartDir; + } + + public Properties getStartupProperties() { + return properties; + } + + public UserInfo getUserInfo(String accessKey) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { + UserInfo info = new UserInfo(); + + UserCredentialsDao credentialDao = new UserCredentialsDao(); + UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey ); + if ( null == cloudKeys ) { + logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" ); + return null; + } else { + info.setAccessKey( accessKey ); + info.setSecretKey( cloudKeys.getSecretKey()); + info.setCanonicalUserId(accessKey); + info.setDescription( "S3 REST request" ); + return info; + } + } + + protected void initialize() { + if(logger.isInfoEnabled()) + logger.info("Initializing ServiceProvider..."); + + File file = ConfigurationHelper.findConfigurationFile("log4j-cloud-bridge.xml"); + if(file != null) { + System.out.println("Log4j configuration from : " + file.getAbsolutePath()); + DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); + } else { + System.out.println("Configure log4j with default properties"); + } + + loadStartupProperties(); + String hostKey = properties.getProperty("host.key"); + if(hostKey == null) { + InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress(); + if(inetAddr != null) + hostKey = NetHelper.getMacAddress(inetAddr); + } + if(hostKey == null) + throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites"); + String host = properties.getProperty("host"); + if(host == null) + host = NetHelper.getHostName(); + + if(properties.get("bucket.dns") != null && + ((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) { + useSubDomain = true; + } + + serviceEndpoint = (String)properties.get("serviceEndpoint"); + masterDomain = new String( "." + serviceEndpoint ); + + setupHost(hostKey, host); + + // we will commit and start a new transaction to allow host info be flushed to DB + PersistContext.flush(); + + String localStorageRoot = properties.getProperty("storage.root"); + if (localStorageRoot != null) setupLocalStorage(localStorageRoot); + + multipartDir = properties.getProperty("storage.multipartDir"); + + timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL); + + if(logger.isInfoEnabled()) + logger.info("ServiceProvider initialized"); + } + + private void loadStartupProperties() { + File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties"); + properties = new Properties(); + if(propertiesFile != null) { + try { + properties.load(new FileInputStream(propertiesFile)); + } catch (FileNotFoundException e) { + logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); + } catch (IOException e) { + logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); + } + + logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath()); + } else { + if(logger.isInfoEnabled()) + logger.info("Startup properties is not found."); + } + } + + private TimerTask getHeartbeatTask() { + return new TimerTask() { + + @Override + public void run() { + try { + MHostDao mhostDao = new MHostDao(); + mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); + mhostDao.update(mhost); + PersistContext.commitTransaction(); + } catch(Throwable e){ + logger.error("Unexpected exception " + e.getMessage(), e); + } finally { + PersistContext.closeSession(); + } + } + }; + } + + private void setupHost(String hostKey, String host) { + MHostDao mhostDao = new MHostDao(); + mhost = mhostDao.getByHostKey(hostKey); + if(mhost == null) { + mhost = new MHost(); + mhost.setHostKey(hostKey); + mhost.setHost(host); + mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); + mhostDao.save(mhost); + } else { + mhost.setHost(host); + mhostDao.update(mhost); + } + } + + private void setupLocalStorage(String storageRoot) { + SHostDao shostDao = new SHostDao(); + SHost shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot); + if(shost == null) { + shost = new SHost(); + shost.setMhost(mhost); + mhost.getLocalSHosts().add(shost); + shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL); + shost.setHost(NetHelper.getHostName()); + shost.setExportRoot(storageRoot); + PersistContext.getSession().save(shost); + } + } + + public void shutdown() { + timer.cancel(); + + if(logger.isInfoEnabled()) + logger.info("ServiceProvider stopped"); + } + + @SuppressWarnings("unchecked") + private static T getProxy(Class serviceInterface, final T serviceObject) { + return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(), + new Class[] { serviceInterface }, + new InvocationHandler() { + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + Object result = null; + try { + result = method.invoke(serviceObject, args); + PersistContext.commitTransaction(); + } catch (PersistException e) { + } catch (SessionException e) { + } catch(Throwable e) { + // Rethrow the exception to Axis: + // Check if the exception is an AxisFault or a RuntimeException + // enveloped AxisFault and if so, pass it on as such. Otherwise + // log to help debugging and throw as is. + if (e.getCause() != null && e.getCause() instanceof AxisFault) + throw e.getCause(); + else if (e.getCause() != null && e.getCause().getCause() != null + && e.getCause().getCause() instanceof AxisFault) + throw e.getCause().getCause(); + else { + logger.warn("Unhandled exception " + e.getMessage(), e); + throw e; + } + } finally { + PersistContext.closeSession(); + } + return result; + } + }); + } + + @SuppressWarnings("unchecked") + public T getServiceImpl(Class serviceInterface) { + return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface)); + } +} diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java index 3846249c604..8b41a0a77a5 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java @@ -1,1873 +1,1873 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.ListIterator; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.TimeZone; -import java.util.UUID; - -import javax.servlet.http.HttpServletResponse; - -import org.apache.log4j.Logger; -import org.hibernate.LockMode; -import org.hibernate.Session; -import org.json.simple.parser.ParseException; - -import com.cloud.bridge.io.S3FileSystemBucketAdapter; -import com.cloud.bridge.model.MHost; -import com.cloud.bridge.model.MHostMount; -import com.cloud.bridge.model.SAcl; -import com.cloud.bridge.model.SBucket; -import com.cloud.bridge.model.SHost; -import com.cloud.bridge.model.SMeta; -import com.cloud.bridge.model.SObject; -import com.cloud.bridge.model.SObjectItem; -import com.cloud.bridge.persist.PersistContext; -import com.cloud.bridge.persist.dao.BucketPolicyDao; -import com.cloud.bridge.persist.dao.MHostDao; -import com.cloud.bridge.persist.dao.MHostMountDao; -import com.cloud.bridge.persist.dao.MultipartLoadDao; -import com.cloud.bridge.persist.dao.SAclDao; -import com.cloud.bridge.persist.dao.SBucketDao; -import com.cloud.bridge.persist.dao.SHostDao; -import com.cloud.bridge.persist.dao.SMetaDao; -import com.cloud.bridge.persist.dao.SObjectDao; -import com.cloud.bridge.persist.dao.SObjectItemDao; -import com.cloud.bridge.service.S3Constants; -import com.cloud.bridge.service.UserContext; -import com.cloud.bridge.service.controller.s3.ServiceProvider; -import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess; -import com.cloud.bridge.service.core.s3.S3CopyObjectRequest.MetadataDirective; -import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; -import com.cloud.bridge.service.core.s3.S3PolicyCondition.ConditionKeys; -import com.cloud.bridge.service.exception.HostNotMountedException; -import com.cloud.bridge.service.exception.InternalErrorException; -import com.cloud.bridge.service.exception.InvalidBucketName; -import com.cloud.bridge.service.exception.NoSuchObjectException; -import com.cloud.bridge.service.exception.ObjectAlreadyExistsException; -import com.cloud.bridge.service.exception.OutOfServiceException; -import com.cloud.bridge.service.exception.OutOfStorageException; -import com.cloud.bridge.service.exception.PermissionDeniedException; -import com.cloud.bridge.service.exception.UnsupportedException; -import com.cloud.bridge.util.DateHelper; -import com.cloud.bridge.util.PolicyParser; -import com.cloud.bridge.util.StringHelper; -import com.cloud.bridge.util.OrderedPair; -import com.cloud.bridge.util.Triple; - -/** - * @author Kelven Yang, John Zucker - * The CRUD control actions to be invoked from S3BucketAction or S3ObjectAction. - */ -public class S3Engine { - protected final static Logger logger = Logger.getLogger(S3Engine.class); - - private final int LOCK_ACQUIRING_TIMEOUT_SECONDS = 10; // ten seconds - - private final Map bucketAdapters = new HashMap(); - - public S3Engine() { - bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter()); - } - - - /** - * Return a S3CopyObjectResponse which represents an object being copied from source - * to destination bucket. - * Called from S3ObjectAction when copying an object. - * This can be treated as first a GET followed by a PUT of the object the user wants to copy. - */ - - public S3CopyObjectResponse handleRequest(S3CopyObjectRequest request) - { - S3CopyObjectResponse response = new S3CopyObjectResponse(); - - // [A] Get the object we want to copy - S3GetObjectRequest getRequest = new S3GetObjectRequest(); - getRequest.setBucketName(request.getSourceBucketName()); - getRequest.setKey(request.getSourceKey()); - getRequest.setVersion(request.getVersion()); - getRequest.setConditions( request.getConditions()); - - getRequest.setInlineData( true ); - getRequest.setReturnData( true ); - if ( MetadataDirective.COPY == request.getDirective()) - getRequest.setReturnMetadata( true ); - else getRequest.setReturnMetadata( false ); - - //-> before we do anything verify the permissions on a copy basis - String destinationBucketName = request.getDestinationBucketName(); - String destinationKeyName = request.getDestinationKey(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, destinationBucketName ); - context.setKeyName( destinationKeyName ); - context.setEvalParam( ConditionKeys.MetaData, request.getDirective().toString()); - context.setEvalParam( ConditionKeys.CopySource, "/" + request.getSourceBucketName() + "/" + request.getSourceKey()); - if (PolicyAccess.DENY == verifyPolicy( context )) - throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - - S3GetObjectResponse originalObject = handleRequest(getRequest); - int resultCode = originalObject.getResultCode(); - if (200 != resultCode) { - response.setResultCode( resultCode ); - response.setResultDescription( originalObject.getResultDescription()); - return response; - } - - response.setCopyVersion( originalObject.getVersion()); - - - // [B] Put the object into the destination bucket - S3PutObjectInlineRequest putRequest = new S3PutObjectInlineRequest(); - putRequest.setBucketName(request.getDestinationBucketName()) ; - putRequest.setKey(destinationKeyName); - if ( MetadataDirective.COPY == request.getDirective()) - putRequest.setMetaEntries(originalObject.getMetaEntries()); - else putRequest.setMetaEntries(request.getMetaEntries()); - putRequest.setAcl(request.getAcl()); // -> if via a SOAP call - putRequest.setCannedAccess(request.getCannedAccess()); // -> if via a REST call - putRequest.setContentLength(originalObject.getContentLength()); - putRequest.setData(originalObject.getData()); - - S3PutObjectInlineResponse putResp = handleRequest(putRequest); - response.setResultCode( putResp.resultCode ); - response.setResultDescription( putResp.getResultDescription()); - response.setETag( putResp.getETag()); - response.setLastModified( putResp.getLastModified()); - response.setPutVersion( putResp.getVersion()); - return response; - } - - public S3CreateBucketResponse handleRequest(S3CreateBucketRequest request) - { - S3CreateBucketResponse response = new S3CreateBucketResponse(); - String cannedAccessPolicy = request.getCannedAccess(); - String bucketName = request.getBucketName(); - response.setBucketName( bucketName ); - - verifyBucketName( bucketName, false ); - - S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket, bucketName ); - context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy ); - if (PolicyAccess.DENY == verifyPolicy( context )) - throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - - if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS)) - { - OrderedPair shost_storagelocation_pair = null; - boolean success = false; - try { - SBucketDao bucketDao = new SBucketDao(); - SAclDao aclDao = new SAclDao(); - - if (bucketDao.getByName(request.getBucketName()) != null) - throw new ObjectAlreadyExistsException("Bucket already exists"); - - shost_storagelocation_pair = allocBucketStorageHost(request.getBucketName(), null); - - SBucket sbucket = new SBucket(); - sbucket.setName(request.getBucketName()); - sbucket.setCreateTime(DateHelper.currentGMTTime()); - sbucket.setOwnerCanonicalId( UserContext.current().getCanonicalUserId()); - sbucket.setShost(shost_storagelocation_pair.getFirst()); - shost_storagelocation_pair.getFirst().getBuckets().add(sbucket); - bucketDao.save(sbucket); - - S3AccessControlList acl = request.getAcl(); - - if ( null != cannedAccessPolicy ) - setCannedAccessControls( cannedAccessPolicy, "SBucket", sbucket.getId(), sbucket ); - else if (null != acl) - aclDao.save( "SBucket", sbucket.getId(), acl ); - else setSingleAcl( "SBucket", sbucket.getId(), SAcl.PERMISSION_FULL ); - - // explicitly commit the transaction - PersistContext.commitTransaction(); - success = true; - } - finally - { - if(!success && shost_storagelocation_pair != null) { - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(shost_storagelocation_pair.getFirst()); - bucketAdapter.deleteContainer(shost_storagelocation_pair.getSecond(), request.getBucketName()); - } - PersistContext.releaseNamedLock("bucket.creation"); - } - - } else { - throw new OutOfServiceException("Unable to acquire synchronization lock"); - } - - return response; - } - - /** - * Return a S3Response which represents the effect of an object being deleted from its bucket. - * Called from S3BucketAction when deleting an object. - */ - - public S3Response handleRequest( S3DeleteBucketRequest request ) - { - S3Response response = new S3Response(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - - if ( sbucket != null ) - { - S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName ); - switch( verifyPolicy( context )) - { - case ALLOW: - // The bucket policy can give users permission to delete a bucket whereas ACLs cannot - break; - - case DENY: - throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - - case DEFAULT_DENY: - default: - // Irrespective of what the ACLs say, only the owner can delete a bucket - String client = UserContext.current().getCanonicalUserId(); - if (!client.equals( sbucket.getOwnerCanonicalId())) { - throw new PermissionDeniedException( "Access Denied - only the owner can delete a bucket" ); - } - break; - } - - - // Delete the file from its storage location - OrderedPair host_storagelocation_pair = getBucketStorageHost(sbucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName()); - - // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. - // To delete SMeta & SAcl objects: - // (1)Get all the objects in the bucket, - // (2)then all the items in each object, - // (3) then all meta & acl data for each item - Set objectsInBucket = sbucket.getObjectsInBucket(); - Iterator it = objectsInBucket.iterator(); - while( it.hasNext()) - { - SObject oneObject = (SObject)it.next(); - Set itemsInObject = oneObject.getItems(); - Iterator is = itemsInObject.iterator(); - while( is.hasNext()) - { - SObjectItem oneItem = (SObjectItem)is.next(); - deleteMetaData( oneItem.getId()); - deleteObjectAcls( "SObjectItem", oneItem.getId()); - } - } - - // Delete all the policy state associated with the bucket - try { - ServiceProvider.getInstance().deleteBucketPolicy( bucketName ); - BucketPolicyDao policyDao = new BucketPolicyDao(); - policyDao.deletePolicy( bucketName ); - } - catch( Exception e ) { - logger.error("When deleting a bucket we must try to delete its policy: ", e); - } - - deleteBucketAcls( sbucket.getId()); - bucketDao.delete( sbucket ); - response.setResultCode(204); - response.setResultDescription("OK"); - } - else - { response.setResultCode(404); - response.setResultDescription("Bucket does not exist"); - } - return response; - } - - /** - * Return a S3ListBucketResponse which represents a list of up to 1000 objects contained ins the bucket. - * Called from S3BucketAction for GETting objects and for GETting object versions. - */ - - public S3ListBucketResponse listBucketContents(S3ListBucketRequest request, boolean includeVersions) - { - S3ListBucketResponse response = new S3ListBucketResponse(); - String bucketName = request.getBucketName(); - String prefix = request.getPrefix(); - if (prefix == null) prefix = StringHelper.EMPTY_STRING; - String marker = request.getMarker(); - if (marker == null) marker = StringHelper.EMPTY_STRING; - - String delimiter = request.getDelimiter(); - int maxKeys = request.getMaxKeys(); - if(maxKeys <= 0) maxKeys = 1000; - - SBucketDao bucketDao = new SBucketDao(); - SBucket sbucket = bucketDao.getByName(bucketName); - if (sbucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - PolicyActions action = (includeVersions ? PolicyActions.ListBucketVersions : PolicyActions.ListBucket); - S3PolicyContext context = new S3PolicyContext( action, bucketName ); - context.setEvalParam( ConditionKeys.MaxKeys, new String( "" + maxKeys )); - context.setEvalParam( ConditionKeys.Prefix, prefix ); - context.setEvalParam( ConditionKeys.Delimiter, delimiter ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ ); - - - // Wen execting the query, request one more item so that we know how to set isTruncated flag - SObjectDao sobjectDao = new SObjectDao(); - List l = null; - - if ( includeVersions ) - l = sobjectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 ); - else l = sobjectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 ); - - response.setBucketName(bucketName); - response.setMarker(marker); - response.setMaxKeys(maxKeys); - response.setPrefix(prefix); - response.setDelimiter(delimiter); - response.setTruncated(l.size() > maxKeys); - if(l.size() > maxKeys) { - response.setNextMarker(l.get(l.size() - 1).getNameKey()); - } - - // If needed - SOAP response does not support versioning - response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker())); - response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys)); - return response; - } - - /** - * Return a S3ListAllMyBucketResponse which represents a list of all buckets owned by the requester. - * Called from S3BucketAction for GETting all buckets. - * To check on bucket policies defined we have to (look for and) evaluate the policy on each - * bucket the user owns. - */ - public S3ListAllMyBucketsResponse handleRequest(S3ListAllMyBucketsRequest request) - { - S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse(); - SBucketDao bucketDao = new SBucketDao(); - - // "...you can only list buckets for which you are the owner." - List buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId()); - S3CanonicalUser owner = new S3CanonicalUser(); - owner.setID(UserContext.current().getCanonicalUserId()); - owner.setDisplayName(""); - response.setOwner(owner); - - if (buckets != null) - { - S3ListAllMyBucketsEntry[] entries = new S3ListAllMyBucketsEntry[buckets.size()]; - int i = 0; - for(SBucket bucket : buckets) - { - String bucketName = bucket.getName(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.ListAllMyBuckets, bucketName ); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_PASS ); - - entries[i] = new S3ListAllMyBucketsEntry(); - entries[i].setName(bucketName); - entries[i].setCreationDate(DateHelper.toCalendar(bucket.getCreateTime())); - i++; - } - response.setBuckets(entries); - } - return response; - } - - /** - * Return an S3Response representing the result of PUTTING the ACL of a given bucket. - * Called from S3BucketAction to PUT its ACL. - */ - - public S3Response handleRequest(S3SetBucketAccessControlPolicyRequest request) - { - S3Response response = new S3Response(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName(bucketName); - if(sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket does not exist"); - return response; - } - - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL ); - - SAclDao aclDao = new SAclDao(); - aclDao.save("SBucket", sbucket.getId(), request.getAcl()); - - response.setResultCode(200); - response.setResultDescription("OK"); - return response; - } - - - /** - * Return a S3AccessControlPolicy representing the ACL of a given bucket. - * Called from S3BucketAction to GET its ACL. - */ - - public S3AccessControlPolicy handleRequest(S3GetBucketAccessControlPolicyRequest request) - { - S3AccessControlPolicy policy = new S3AccessControlPolicy(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) - throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - S3CanonicalUser owner = new S3CanonicalUser(); - owner.setID(sbucket.getOwnerCanonicalId()); - owner.setDisplayName(""); - policy.setOwner(owner); - - S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketAcl, bucketName ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ_ACL ); - - SAclDao aclDao = new SAclDao(); - List grants = aclDao.listGrants("SBucket", sbucket.getId()); - policy.setGrants(S3Grant.toGrants(grants)); - return policy; - } - - /** - * This method should be called if a multipart upload is aborted OR has completed successfully and - * the individual parts have to be cleaned up. - * Called from S3ObjectAction when executing at completion or when aborting multipart upload. - * @param bucketName - * @param uploadId - * @param verifyPermission - If false then do not check the user's permission to clean up the state - */ - public int freeUploadParts(String bucketName, int uploadId, boolean verifyPermission) - { - // -> we need to look up the final bucket to figure out which mount point to use to save the part in - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); - return 404; - } - - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - OrderedPair exists = uploadDao.multipartExits( uploadId ); - if (null == exists) { - logger.error( "initiateMultipartUpload failed since multipart upload" + uploadId + " does not exist" ); - return 404; - } - - // -> the multipart initiator or bucket owner can do this action by default - if (verifyPermission) - { - String initiator = uploadDao.getInitiator( uploadId ); - if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) - { - // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket - S3PolicyContext context = new S3PolicyContext( PolicyActions.AbortMultipartUpload, bucketName ); - context.setKeyName( exists.getSecond()); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - } - } - - // -> first get a list of all the uploaded files and delete one by one - S3MultipartPart[] parts = uploadDao.getParts( uploadId, 10000, 0 ); - for( int i=0; i < parts.length; i++ ) - { - bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), parts[i].getPath()); - } - - uploadDao.deleteUpload( uploadId ); - return 204; - - } - catch( PermissionDeniedException e ) { - logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e); - throw e; - } - catch (Exception e) { - logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e); - return 500; - } - } - - /** - * The initiator must have permission to write to the bucket in question in order to initiate - * a multipart upload. Also check to make sure the special folder used to store parts of - * a multipart exists for this bucket. - * Called from S3ObjectAction during many stages of multipart upload. - */ - public S3PutObjectInlineResponse initiateMultipartUpload(S3PutObjectInlineRequest request) - { - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - String nameKey = request.getKey(); - - // -> does the bucket exist and can we write to it? - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); - response.setResultCode(404); - } - - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); - context.setKeyName( nameKey ); - context.setEvalParam( ConditionKeys.Acl, request.getCannedAccess()); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - - createUploadFolder( bucketName ); - - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - int uploadId = uploadDao.initiateUpload( UserContext.current().getAccessKey(), bucketName, nameKey, request.getCannedAccess(), request.getMetaEntries()); - response.setUploadId( uploadId ); - response.setResultCode(200); - - } catch( Exception e ) { - logger.error("initiateMultipartUpload exception: ", e); - response.setResultCode(500); - } - - return response; - } - - /** - * Save the object fragment in a special (i.e., hidden) directory inside the same mount point as - * the bucket location that the final object will be stored in. - * Called from S3ObjectAction during many stages of multipart upload. - * @param request - * @param uploadId - * @param partNumber - * @return S3PutObjectInlineResponse - */ - public S3PutObjectInlineResponse saveUploadPart(S3PutObjectInlineRequest request, int uploadId, int partNumber) - { - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - - // -> we need to look up the final bucket to figure out which mount point to use to save the part in - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "saveUploadedPart failed since " + bucketName + " does not exist" ); - response.setResultCode(404); - } - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); - context.setKeyName( request.getKey()); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = new String( uploadId + "-" + partNumber ); - InputStream is = null; - - try { - is = request.getDataInputStream(); - String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName); - response.setETag(md5Checksum); - - MultipartLoadDao uploadDao = new MultipartLoadDao(); - uploadDao.savePart( uploadId, partNumber, md5Checksum, itemFileName, (int)request.getContentLength()); - response.setResultCode(200); - - } catch (IOException e) { - logger.error("UploadPart failed due to " + e.getMessage(), e); - response.setResultCode(500); - } catch (OutOfStorageException e) { - logger.error("UploadPart failed due to " + e.getMessage(), e); - response.setResultCode(500); - } catch (Exception e) { - logger.error("UploadPart failed due to " + e.getMessage(), e); - response.setResultCode(500); - } finally { - if(is != null) { - try { - is.close(); - } catch (IOException e) { - logger.error("UploadPart unable to close stream from data handler.", e); - } - } - } - - return response; - } - - /** - * Create the real object represented by all the parts of the multipart upload. - * Called from S3ObjectAction at completion of multipart upload. - * @param httpResp - Servlet response handle to return the headers of the response (including version header) - * @param request - Normal parameters needed to create a new object (including metadata) - * @param parts - List of files that make up the multipart - * @param outputStream - Response output stream - * N.B. - This method can be long-lasting - * We are required to keep the connection alive by returning whitespace characters back periodically. - */ - - public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream outputStream) throws IOException - { - // [A] Set up and initial error checking - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - String key = request.getKey(); - S3MetaDataEntry[] meta = request.getMetaEntries(); - - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "completeMultipartUpload( failed since " + bucketName + " does not exist" ); - response.setResultCode(404); - } - - // [B] Now we need to create the final re-assembled object - // -> the allocObjectItem checks for the bucket policy PutObject permissions - OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, null, request.getCannedAccess()); - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); - - // -> Amazon defines that we must return a 200 response immediately to the client, but - // -> we don't know the version header until we hit here - httpResp.setStatus(200); - httpResp.setContentType("text/xml; charset=UTF-8"); - String version = object_objectitem_pair.getSecond().getVersion(); - if (null != version) httpResp.addHeader( "x-amz-version-id", version ); - httpResp.flushBuffer(); - - - // [C] Re-assemble the object from its uploaded file parts - try { - // explicit transaction control to avoid holding transaction during long file concatenation process - PersistContext.commitTransaction(); - - OrderedPair result = bucketAdapter. - concatentateObjects - ( host_storagelocation_pair.getSecond(), - bucket.getName(), - itemFileName, - ServiceProvider.getInstance().getMultipartDir(), - parts, - outputStream ); - response.setETag(result.getFirst()); - response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); - - SObjectItemDao itemDao = new SObjectItemDao(); - SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); - item.setMd5(result.getFirst()); - item.setStoredSize(result.getSecond().longValue()); - response.setResultCode(200); - - PersistContext.getSession().save(item); - } - catch (Exception e) { - logger.error("completeMultipartUpload failed due to " + e.getMessage(), e); - } - return response; - } - - /** - * Return a S3PutObjectInlineResponse which represents an object being created into a bucket - * Called from S3ObjectAction when PUTting or POTing an object. - */ - - public S3PutObjectInlineResponse handleRequest(S3PutObjectInlineRequest request) - { - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - String key = request.getKey(); - long contentLength = request.getContentLength(); - S3MetaDataEntry[] meta = request.getMetaEntries(); - S3AccessControlList acl = request.getAcl(); - - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - - // Is the caller allowed to write the object? - // The allocObjectItem checks for the bucket policy PutObject permissions - OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess()); - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); - InputStream is = null; - - try { - // explicit transaction control to avoid holding transaction during file-copy process - PersistContext.commitTransaction(); - - is = request.getDataInputStream(); - String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); - response.setETag(md5Checksum); - response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); - response.setVersion( object_objectitem_pair.getSecond().getVersion()); - - SObjectItemDao itemDao = new SObjectItemDao(); - SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); - item.setMd5(md5Checksum); - item.setStoredSize(contentLength); - PersistContext.getSession().save(item); - - } catch (IOException e) { - logger.error("PutObjectInline failed due to " + e.getMessage(), e); - } catch (OutOfStorageException e) { - logger.error("PutObjectInline failed due to " + e.getMessage(), e); - } finally { - if(is != null) { - try { - is.close(); - } catch (IOException e) { - logger.error("PutObjectInline unable to close stream from data handler.", e); - } - } - } - - return response; - } - - /** - * Return a S3PutObjectResponse which represents an object being created into a bucket - * Called from S3RestServlet when processing a DIME request. - */ - - public S3PutObjectResponse handleRequest(S3PutObjectRequest request) - { - S3PutObjectResponse response = new S3PutObjectResponse(); - String bucketName = request.getBucketName(); - String key = request.getKey(); - long contentLength = request.getContentLength(); - S3MetaDataEntry[] meta = request.getMetaEntries(); - S3AccessControlList acl = request.getAcl(); - - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - // Is the caller allowed to write the object? - // The allocObjectItem checks for the bucket policy PutObject permissions - OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null); - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); - InputStream is = null; - try { - // explicit transaction control to avoid holding transaction during file-copy process - PersistContext.commitTransaction(); - - is = request.getInputStream(); - String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); - response.setETag(md5Checksum); - response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); - - SObjectItemDao itemDao = new SObjectItemDao(); - SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); - item.setMd5(md5Checksum); - item.setStoredSize(contentLength); - PersistContext.getSession().save(item); - - } catch (OutOfStorageException e) { - logger.error("PutObject failed due to " + e.getMessage(), e); - } finally { - if(is != null) { - try { - is.close(); - } catch (IOException e) { - logger.error("Unable to close stream from data handler.", e); - } - } - } - - return response; - } - - /** - * The ACL of an object is set at the object version level. By default, PUT sets the ACL of the latest - * version of an object. To set the ACL of a different version, using the versionId subresource. - * Called from S3ObjectAction to PUT an object's ACL. - */ - - public S3Response handleRequest(S3SetObjectAccessControlPolicyRequest request) - { - S3PolicyContext context = null; - - // [A] First find the object in the bucket - S3Response response = new S3Response(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - if(sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket " + bucketName + "does not exist"); - return response; - } - - SObjectDao sobjectDao = new SObjectDao(); - String nameKey = request.getKey(); - SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey ); - if(sobject == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " in bucket " + bucketName + " does not exist"); - return response; - } - - String deletionMark = sobject.getDeletionMark(); - if (null != deletionMark) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); - return response; - } - - - // [B] Versioning allow the client to ask for a specific version not just the latest - SObjectItem item = null; - int versioningStatus = sbucket.getVersioningStatus(); - String wantVersion = request.getVersion(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) - item = sobject.getVersion( wantVersion ); - else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - - if (item == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); - return response; - } - - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { - context = new S3PolicyContext( PolicyActions.PutObjectAclVersion, bucketName ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - response.setVersion( item.getVersion()); - } - else context = new S3PolicyContext( PolicyActions.PutObjectAcl, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_WRITE_ACL ); - - // -> the acl always goes on the instance of the object - SAclDao aclDao = new SAclDao(); - aclDao.save("SObjectItem", item.getId(), request.getAcl()); - - response.setResultCode(200); - response.setResultDescription("OK"); - return response; - } - - /** - * By default, GET returns ACL information about the latest version of an object. To return ACL - * information about a different version, use the versionId subresource - * Called from S3ObjectAction to get an object's ACL. - */ - - public S3AccessControlPolicy handleRequest(S3GetObjectAccessControlPolicyRequest request) - { - S3PolicyContext context = null; - - // [A] Does the object exist that holds the ACL we are looking for? - S3AccessControlPolicy policy = new S3AccessControlPolicy(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) - throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - SObjectDao sobjectDao = new SObjectDao(); - String nameKey = request.getKey(); - SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey ); - if (sobject == null) - throw new NoSuchObjectException("Object " + request.getKey() + " does not exist"); - - String deletionMark = sobject.getDeletionMark(); - if (null != deletionMark) { - policy.setResultCode(404); - policy.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); - return policy; - } - - - // [B] Versioning allow the client to ask for a specific version not just the latest - SObjectItem item = null; - int versioningStatus = sbucket.getVersioningStatus(); - String wantVersion = request.getVersion(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) - item = sobject.getVersion( wantVersion ); - else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - - if (item == null) { - policy.setResultCode(404); - policy.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); - return policy; - } - - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { - context = new S3PolicyContext( PolicyActions.GetObjectVersionAcl, bucketName ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - policy.setVersion( item.getVersion()); - } - else context = new S3PolicyContext( PolicyActions.GetObjectAcl, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ_ACL ); - - - // [C] ACLs are ALWAYS on an instance of the object - S3CanonicalUser owner = new S3CanonicalUser(); - owner.setID(sobject.getOwnerCanonicalId()); - owner.setDisplayName(""); - policy.setOwner(owner); - policy.setResultCode(200); - - SAclDao aclDao = new SAclDao(); - List grants = aclDao.listGrants( "SObjectItem", item.getId()); - policy.setGrants(S3Grant.toGrants(grants)); - return policy; - } - - /** - * Handle requests for GET object and HEAD "get object extended" - * Called from S3ObjectAction for GET and HEAD of an object. - */ - - public S3GetObjectResponse handleRequest(S3GetObjectRequest request) - { - S3GetObjectResponse response = new S3GetObjectResponse(); - S3PolicyContext context = null; - boolean ifRange = false; - long bytesStart = request.getByteRangeStart(); - long bytesEnd = request.getByteRangeEnd(); - int resultCode = 200; - - // [A] Verify that the bucket and the object exist - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName(bucketName); - if (sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket " + request.getBucketName() + " does not exist"); - return response; - } - - SObjectDao objectDao = new SObjectDao(); - String nameKey = request.getKey(); - SObject sobject = objectDao.getByNameKey( sbucket, nameKey ); - if (sobject == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " does not exist in bucket " + request.getBucketName()); - return response; - } - - String deletionMark = sobject.getDeletionMark(); - if (null != deletionMark) { - response.setDeleteMarker( deletionMark ); - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); - return response; - } - - - // [B] Versioning allow the client to ask for a specific version not just the latest - SObjectItem item = null; - int versioningStatus = sbucket.getVersioningStatus(); - String wantVersion = request.getVersion(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) - item = sobject.getVersion( wantVersion ); - else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - - if (item == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); - return response; - } - - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { - context = new S3PolicyContext( PolicyActions.GetObjectVersion, bucketName ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - } - else context = new S3PolicyContext( PolicyActions.GetObject, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ ); - - - // [C] Handle all the IFModifiedSince ... conditions, and access privileges - // -> http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 (HTTP If-Range header) - if (request.isReturnCompleteObjectOnConditionFailure() && (0 <= bytesStart && 0 <= bytesEnd)) ifRange = true; - - resultCode = conditionPassed( request.getConditions(), item.getLastModifiedTime(), item.getMd5(), ifRange ); - if ( -1 == resultCode ) { - // -> If-Range implementation, we have to return the entire object - resultCode = 200; - bytesStart = -1; - bytesEnd = -1; - } - else if (200 != resultCode) { - response.setResultCode( resultCode ); - response.setResultDescription( "Precondition Failed" ); - return response; - } - - - // [D] Return the contents of the object inline - // -> extract the meta data that corresponds the specific versioned item - SMetaDao metaDao = new SMetaDao(); - List itemMetaData = metaDao.getByTarget( "SObjectItem", item.getId()); - if (null != itemMetaData) - { - int i = 0; - S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ]; - ListIterator it = itemMetaData.listIterator(); - while( it.hasNext()) { - SMeta oneTag = (SMeta)it.next(); - S3MetaDataEntry oneEntry = new S3MetaDataEntry(); - oneEntry.setName( oneTag.getName()); - oneEntry.setValue( oneTag.getValue()); - metaEntries[i++] = oneEntry; - } - response.setMetaEntries( metaEntries ); - } - - // -> support a single byte range - if ( 0 <= bytesStart && 0 <= bytesEnd ) { - response.setContentLength( bytesEnd - bytesStart ); - resultCode = 206; - } - else response.setContentLength( item.getStoredSize()); - - if(request.isReturnData()) - { - response.setETag(item.getMd5()); - response.setLastModified(DateHelper.toCalendar( item.getLastModifiedTime())); - response.setVersion( item.getVersion()); - if (request.isInlineData()) - { - OrderedPair tupleSHostInfo = getBucketStorageHost(sbucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst()); - - if ( 0 <= bytesStart && 0 <= bytesEnd ) - response.setData(bucketAdapter.loadObjectRange(tupleSHostInfo.getSecond(), - request.getBucketName(), item.getStoredPath(), bytesStart, bytesEnd )); - else response.setData(bucketAdapter.loadObject(tupleSHostInfo.getSecond(), request.getBucketName(), item.getStoredPath())); - } - } - - response.setResultCode( resultCode ); - response.setResultDescription("OK"); - return response; - } - - /** - * Handle object deletion requests, both versioning and non-versioning requirements. - * Called from S3ObjectAction for deletion. - */ - public S3Response handleRequest(S3DeleteObjectRequest request) - { - // Verify that the bucket and object exist - S3Response response = new S3Response(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket " + bucketName + " does not exist"); - return response; - } - - SObjectDao objectDao = new SObjectDao(); - String nameKey = request.getKey(); - SObject sobject = objectDao.getByNameKey( sbucket, nameKey ); - if (sobject == null) { - response.setResultCode(404); - response.setResultDescription("No object with key " + nameKey + " exists in bucket " + bucketName); - return response; - } - - - // Discover whether versioning is enabled. If so versioning requires the setting of a deletion marker. - String storedPath = null; - SObjectItem item = null; - int versioningStatus = sbucket.getVersioningStatus(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) - { - String wantVersion = request.getVersion(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObjectVersion, bucketName ); - context.setKeyName( nameKey ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); - - if (null == wantVersion) { - // If versioning is on and no versionId is given then we just write a deletion marker - sobject.setDeletionMark( UUID.randomUUID().toString()); - objectDao.update( sobject ); - } - else { - // Otherwise remove the deletion marker if this has been set - String deletionMarker = sobject.getDeletionMark(); - if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) { - sobject.setDeletionMark( null ); - objectDao.update( sobject ); - response.setResultCode(204); - return response; - } - - // If versioning is on and the versionId is given (non-null) then delete the object matching that version - if ( null == (item = sobject.getVersion( wantVersion ))) { - response.setResultCode(404); - return response; - } - else { - // Providing versionId is non-null, then just delete the one item that matches the versionId from the database - storedPath = item.getStoredPath(); - sobject.deleteItem( item.getId()); - objectDao.update( sobject ); - } - } - } - else - { // If versioning is off then we do delete the null object - S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObject, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); - - if ( null == (item = sobject.getLatestVersion( true ))) { - response.setResultCode(404); - return response; - } - else { - // If there is no item with a null version then we are done - if (null == item.getVersion()) { - // Otherwiswe remove the entire object - // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects. - storedPath = item.getStoredPath(); - deleteMetaData( item.getId()); - deleteObjectAcls( "SObjectItem", item.getId()); - objectDao.delete( sobject ); - } - } - } - - // Delete the file holding the object - if (null != storedPath) - { - OrderedPair host_storagelocation_pair = getBucketStorageHost( sbucket ); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( host_storagelocation_pair.getFirst()); - bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), bucketName, storedPath ); - } - - response.setResultCode(204); - return response; - } - - - private void deleteMetaData( long itemId ) { - SMetaDao metaDao = new SMetaDao(); - List itemMetaData = metaDao.getByTarget( "SObjectItem", itemId ); - if (null != itemMetaData) - { - ListIterator it = itemMetaData.listIterator(); - while( it.hasNext()) { - SMeta oneTag = (SMeta)it.next(); - metaDao.delete( oneTag ); - } - } - } - - private void deleteObjectAcls( String target, long itemId ) { - SAclDao aclDao = new SAclDao(); - List itemAclData = aclDao.listGrants( target, itemId ); - if (null != itemAclData) - { - ListIterator it = itemAclData.listIterator(); - while( it.hasNext()) { - SAcl oneTag = (SAcl)it.next(); - aclDao.delete( oneTag ); - } - } - } - - private void deleteBucketAcls( long bucketId ) { - SAclDao aclDao = new SAclDao(); - List bucketAclData = aclDao.listGrants( "SBucket", bucketId ); - if (null != bucketAclData) - { - ListIterator it = bucketAclData.listIterator(); - while( it.hasNext()) { - SAcl oneTag = (SAcl)it.next(); - aclDao.delete( oneTag ); - } - } - } - - private S3ListBucketPrefixEntry[] composeListBucketPrefixEntries(List l, String prefix, String delimiter, int maxKeys) - { - List entries = new ArrayList(); - int count = 0; - - for(SObject sobject : l) - { - if(delimiter != null && !delimiter.isEmpty()) - { - String subName = StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter); - if(subName != null) - { - S3ListBucketPrefixEntry entry = new S3ListBucketPrefixEntry(); - if ( prefix != null && prefix.length() > 0) - entry.setPrefix(prefix + delimiter + subName); - else entry.setPrefix(subName); - } - } - count++; - if(count >= maxKeys) break; - } - - if(entries.size() > 0) return entries.toArray(new S3ListBucketPrefixEntry[0]); - return null; - } - - /** - * The 'versionIdMarker' parameter only makes sense if enableVersion is true. - * versionIdMarker is the starting point to return information back. So for example if an - * object has versions 1,2,3,4,5 and the versionIdMarker is '3', then 3,4,5 will be returned - * by this function. If the versionIdMarker is null then all versions are returned. - * - * TODO - how does the versionIdMarker work when there is a deletion marker in the object? - */ - private S3ListBucketObjectEntry[] composeListBucketContentEntries(List l, String prefix, String delimiter, int maxKeys, boolean enableVersion, String versionIdMarker) - { - List entries = new ArrayList(); - SObjectItem latest = null; - boolean hitIdMarker = false; - int count = 0; - - for( SObject sobject : l ) - { - if (delimiter != null && !delimiter.isEmpty()) - { - if (StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter) != null) - continue; - } - - if (enableVersion) - { - hitIdMarker = (null == versionIdMarker ? true : false); - - // This supports GET REST calls with /?versions - String deletionMarker = sobject.getDeletionMark(); - if ( null != deletionMarker ) - { - // TODO we should also save the timestamp when something is deleted - S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); - entry.setKey(sobject.getNameKey()); - entry.setVersion( deletionMarker ); - entry.setIsLatest( true ); - entry.setIsDeletionMarker( true ); - entry.setLastModified( Calendar.getInstance( TimeZone.getTimeZone("GMT") )); - entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); - entry.setOwnerDisplayName(""); - entries.add( entry ); - latest = null; - } - else latest = sobject.getLatestVersion( false ); - - Iterator it = sobject.getItems().iterator(); - while( it.hasNext()) - { - SObjectItem item = (SObjectItem)it.next(); - - if ( !hitIdMarker ) - { - if (item.getVersion().equalsIgnoreCase( versionIdMarker )) { - hitIdMarker = true; - entries.add( toListEntry( sobject, item, latest )); - } - } - else entries.add( toListEntry( sobject, item, latest )); - } - } - else - { // -> if there are multiple versions of an object then just return its last version - Iterator it = sobject.getItems().iterator(); - SObjectItem lastestItem = null; - int maxVersion = 0; - int version = 0; - while(it.hasNext()) - { - SObjectItem item = (SObjectItem)it.next(); - String versionStr = item.getVersion(); - - if ( null != versionStr ) - version = Integer.parseInt(item.getVersion()); - else lastestItem = item; - - // -> if the bucket has versions turned on - if (version > maxVersion) { - maxVersion = version; - lastestItem = item; - } - } - if (lastestItem != null) { - entries.add( toListEntry( sobject, lastestItem, null )); - } - } - - count++; - if(count >= maxKeys) break; - } - - if ( entries.size() > 0 ) - return entries.toArray(new S3ListBucketObjectEntry[0]); - else return null; - } - - private static S3ListBucketObjectEntry toListEntry( SObject sobject, SObjectItem item, SObjectItem latest ) - { - S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); - entry.setKey(sobject.getNameKey()); - entry.setVersion( item.getVersion()); - entry.setETag( "\"" + item.getMd5() + "\"" ); - entry.setSize(item.getStoredSize()); - entry.setStorageClass( "STANDARD" ); - entry.setLastModified(DateHelper.toCalendar(item.getLastModifiedTime())); - entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); - entry.setOwnerDisplayName(""); - - if (null != latest && item == latest) entry.setIsLatest( true ); - return entry; - } - - private OrderedPair getBucketStorageHost(SBucket bucket) - { - MHostMountDao mountDao = new MHostMountDao(); - - SHost shost = bucket.getShost(); - if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_LOCAL) { - return new OrderedPair(shost, shost.getExportRoot()); - } - - MHostMount mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId()); - if(mount != null) { - return new OrderedPair(shost, mount.getMountPath()); - } - - // need to redirect request to other node - throw new HostNotMountedException("Storage host " + shost.getHost() + " is not locally mounted"); - } - - /** - * Locate the folder to hold upload parts at the same mount point as the upload's final bucket - * location. Create the upload folder dynamically. - * - * @param bucketName - */ - private void createUploadFolder(String bucketName) - { - if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS)) - { - try { - allocBucketStorageHost(bucketName, ServiceProvider.getInstance().getMultipartDir()); - } - finally { - PersistContext.releaseNamedLock("bucket.creation"); - } - } - } - - /** - * The overrideName is used to create a hidden storage bucket (folder) in the same location - * as the given bucketName. This can be used to create a folder for parts of a multipart - * upload for the associated bucket. - * - * @param bucketName - * @param overrideName - * @return - */ - private OrderedPair allocBucketStorageHost(String bucketName, String overrideName) - { - MHostDao mhostDao = new MHostDao(); - SHostDao shostDao = new SHostDao(); - - MHost mhost = mhostDao.get(ServiceProvider.getInstance().getManagementHostId()); - if(mhost == null) - throw new OutOfServiceException("Temporarily out of service"); - - if(mhost.getMounts().size() > 0) { - Random random = new Random(); - MHostMount[] mounts = (MHostMount[])mhost.getMounts().toArray(); - MHostMount mount = mounts[random.nextInt(mounts.length)]; - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(mount.getShost()); - bucketAdapter.createContainer(mount.getMountPath(), (null != overrideName ? overrideName : bucketName)); - return new OrderedPair(mount.getShost(), mount.getMountPath()); - } - - // To make things simple, only allow one local mounted storage root TODO - Change in the future - String localStorageRoot = ServiceProvider.getInstance().getStartupProperties().getProperty("storage.root"); - if(localStorageRoot != null) { - SHost localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot); - if(localSHost == null) - throw new InternalErrorException("storage.root is configured but not initialized"); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(localSHost); - bucketAdapter.createContainer(localSHost.getExportRoot(),(null != overrideName ? overrideName : bucketName)); - return new OrderedPair(localSHost, localStorageRoot); - } - - throw new OutOfStorageException("No storage host is available"); - } - - public S3BucketAdapter getStorageHostBucketAdapter(SHost shost) - { - S3BucketAdapter adapter = bucketAdapters.get(shost.getHostType()); - if(adapter == null) - throw new InternalErrorException("Bucket adapter is not installed for host type: " + shost.getHostType()); - - return adapter; - } - - /** - * If acl is set then the cannedAccessPolicy parameter should be null and is ignored. - * The cannedAccessPolicy parameter is for REST Put requests only where a simple set of ACLs can be - * created with a single header value. Note that we do not currently support "anonymous" un-authenticated - * access in our implementation. - * - * @throws IOException - */ - @SuppressWarnings("deprecation") - public OrderedPair allocObjectItem(SBucket bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy) - { - SObjectDao objectDao = new SObjectDao(); - SObjectItemDao objectItemDao = new SObjectItemDao(); - SMetaDao metaDao = new SMetaDao(); - SAclDao aclDao = new SAclDao(); - SObjectItem item = null; - int versionSeq = 1; - int versioningStatus = bucket.getVersioningStatus(); - - Session session = PersistContext.getSession(); - - // [A] To write into a bucket the user must have write permission to that bucket - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucket.getName()); - context.setKeyName( nameKey ); - context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy); - - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); // TODO - check this validates plain POSTs - - // [B] If versioning is off them we over write a null object item - SObject object = objectDao.getByNameKey(bucket, nameKey); - if ( object != null ) - { - // -> if versioning is on create new object items - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) - { - session.lock(object, LockMode.UPGRADE); - versionSeq = object.getNextSequence(); - object.setNextSequence(versionSeq + 1); - session.save(object); - - item = new SObjectItem(); - item.setTheObject(object); - object.getItems().add(item); - item.setVersion(String.valueOf(versionSeq)); - Date ts = DateHelper.currentGMTTime(); - item.setCreateTime(ts); - item.setLastAccessTime(ts); - item.setLastModifiedTime(ts); - session.save(item); - } - else - { // -> find an object item with a null version, can be null - // if bucket started out with versioning enabled and was then suspended - item = objectItemDao.getByObjectIdNullVersion( object.getId()); - if (item == null) - { - item = new SObjectItem(); - item.setTheObject(object); - object.getItems().add(item); - Date ts = DateHelper.currentGMTTime(); - item.setCreateTime(ts); - item.setLastAccessTime(ts); - item.setLastModifiedTime(ts); - session.save(item); - } - } - } - else - { // -> there is no object nor an object item - object = new SObject(); - object.setBucket(bucket); - object.setNameKey(nameKey); - object.setNextSequence(2); - object.setCreateTime(DateHelper.currentGMTTime()); - object.setOwnerCanonicalId(UserContext.current().getCanonicalUserId()); - session.save(object); - - item = new SObjectItem(); - item.setTheObject(object); - object.getItems().add(item); - if (SBucket.VERSIONING_ENABLED == versioningStatus) item.setVersion(String.valueOf(versionSeq)); - Date ts = DateHelper.currentGMTTime(); - item.setCreateTime(ts); - item.setLastAccessTime(ts); - item.setLastModifiedTime(ts); - session.save(item); - } - - - // [C] We will use the item DB id as the file name, MD5/contentLength will be stored later - String suffix = null; - int dotPos = nameKey.lastIndexOf('.'); - if (dotPos >= 0) suffix = nameKey.substring(dotPos); - if ( suffix != null ) - item.setStoredPath(String.valueOf(item.getId()) + suffix); - else item.setStoredPath(String.valueOf(item.getId())); - - metaDao.save("SObjectItem", item.getId(), meta); - - - // [D] Are we setting an ACL along with the object - // -> the ACL is ALWAYS set on a particular instance of the object (i.e., a version) - if ( null != cannedAccessPolicy ) - { - setCannedAccessControls( cannedAccessPolicy, "SObjectItem", item.getId(), bucket ); - } - else if (null == acl || 0 == acl.size()) - { - // -> this is termed the "private" or default ACL, "Owner gets FULL_CONTROL" - setSingleAcl( "SObjectItem", item.getId(), SAcl.PERMISSION_FULL ); - } - else if (null != acl) { - aclDao.save( "SObjectItem", item.getId(), acl ); - } - - session.update(item); - return new OrderedPair(object, item); - } - - - /** - * Access controls that are specified via the "x-amz-acl:" headers in REST requests. - * Note that canned policies can be set when the object's contents are set - */ - public void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucket bucket ) - { - // Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy - Triple permission_permission_symbol_triple = - SAcl.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId()); - if ( null == permission_permission_symbol_triple.getThird() ) - setSingleAcl(target, objectId, permission_permission_symbol_triple.getFirst()); - else - { setDefaultAcls( target, - objectId, - permission_permission_symbol_triple.getFirst(), // permission according to ownership of object - permission_permission_symbol_triple.getSecond(), // permission according to ownership of bucket - permission_permission_symbol_triple.getThird() ); // "symbol" to indicate principal or otherwise name of owner - - } - } - - - private void setSingleAcl( String target, long targetId, int permission ) - { - SAclDao aclDao = new SAclDao(); - S3AccessControlList defaultAcl = new S3AccessControlList(); - - // -> if an annoymous request, then do not rewrite the ACL - String userId = UserContext.current().getCanonicalUserId(); - if (0 < userId.length()) - { - S3Grant defaultGrant = new S3Grant(); - defaultGrant.setGrantee(SAcl.GRANTEE_USER); - defaultGrant.setCanonicalUserID( userId ); - defaultGrant.setPermission( permission ); - defaultAcl.addGrant( defaultGrant ); - aclDao.save( target, targetId, defaultAcl ); - } - } - - - /** - * The Cloud Stack API Access key is used for for the Canonical User Id everywhere (buckets and objects). - * - * @param owner - this can be the Cloud Access Key for a bucket owner or one of the - * following special symbols: - * (a) '*' - any principal authenticated user (i.e., any user with a registered Cloud Access Key) - * (b) 'A' - any anonymous principal (i.e., S3 request without an Authorization header) - */ - private void setDefaultAcls( String target, long objectId, int permission1, int permission2, String owner ) - { - SAclDao aclDao = new SAclDao(); - S3AccessControlList defaultAcl = new S3AccessControlList(); - - // -> object owner - S3Grant defaultGrant = new S3Grant(); - defaultGrant.setGrantee(SAcl.GRANTEE_USER); - defaultGrant.setCanonicalUserID( UserContext.current().getCanonicalUserId()); - defaultGrant.setPermission( permission1 ); - defaultAcl.addGrant( defaultGrant ); - - // -> bucket owner - defaultGrant = new S3Grant(); - defaultGrant.setGrantee(SAcl.GRANTEE_USER); - defaultGrant.setCanonicalUserID( owner ); - defaultGrant.setPermission( permission2 ); - defaultAcl.addGrant( defaultGrant ); - aclDao.save( target, objectId, defaultAcl ); - } - - public static PolicyAccess verifyPolicy( S3PolicyContext context ) - { - S3BucketPolicy policy = null; - - // Ordinarily a REST request will pass in an S3PolicyContext for a given bucket by this stage. The HttpServletRequest object - // should be held in the UserContext ready for extraction of the S3BucketPolicy. - // If there is an error in obtaining the request object or in loading the policy then log the failure and return a S3PolicyContext - // which indicates DEFAULT_DENY. Where there is no failure, the policy returned should be specific to the Canonical User ID of the requester. - - try { - // -> in SOAP the HttpServletRequest object is hidden and not passed around - if (null != context) { - context.setHttp( UserContext.current().getHttp()); - policy = loadPolicy( context ); - } - - if ( null != policy ) - return policy.eval(context, UserContext.current().getCanonicalUserId()); - else return PolicyAccess.DEFAULT_DENY; - } - catch( Exception e ) { - logger.error("verifyAccess - loadPolicy failed, bucket: " + context.getBucketName() + " policy ignored", e); - return PolicyAccess.DEFAULT_DENY; - } - } - - /** - * To determine access to a bucket or an object in a bucket evaluate first a define - * bucket policy and then any defined ACLs. - * - * @param context - all data needed for bucket policies - * @param target - used for ACL evaluation, object identifier - * @param targetId - used for ACL evaluation - * @param requestedPermission - ACL type access requested - * - * @throws ParseException, SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException - */ - public static void verifyAccess( S3PolicyContext context, String target, long targetId, int requestedPermission ) - { - switch( verifyPolicy( context ) ) { - case ALLOW: // overrides ACLs (?) - return; - - case DENY: - throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - - case DEFAULT_DENY: - default: - accessAllowed( target, targetId, requestedPermission ); - break; - } - } - - /** - * This method verifies that the accessing client has the requested - * permission on the object/bucket/Acl represented by the tuple: - * - * For cases where an ACL is meant for any authenticated user we place a "*" for the - * Canonical User Id. N.B. - "*" is not a legal Cloud (Bridge) Access key. - * - * For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the - * Canonical User Id. N.B. - "A" is not a legal Cloud (Bridge) Access key. - */ - public static void accessAllowed( String target, long targetId, int requestedPermission ) - { - if (SAcl.PERMISSION_PASS == requestedPermission) return; - - SAclDao aclDao = new SAclDao(); - - // If an annoymous request, then canonicalUserId is an empty string - String userId = UserContext.current().getCanonicalUserId(); - if ( 0 == userId.length()) - { - // Is an anonymous principal ACL set for this ? - if (hasPermission( aclDao.listGrants( target, targetId, "A" ), requestedPermission )) return; - } - else - { - if (hasPermission( aclDao.listGrants( target, targetId, userId ), requestedPermission )) return; - // Or alternatively is there is any principal authenticated ACL set for this ? - if (hasPermission( aclDao.listGrants( target, targetId, "*" ), requestedPermission )) return; - } - // No privileges implies that no access is allowed in the case of an anonymous user - throw new PermissionDeniedException( "Access Denied - ACLs do not give user the required permission" ); - } - - /** - * This method assumes that the bucket has been tested to make sure it exists before - * it is called. - * - * @param context - * @return S3BucketPolicy - * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException, ParseException - */ - public static S3BucketPolicy loadPolicy( S3PolicyContext context ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException - { - OrderedPair result = ServiceProvider.getInstance().getBucketPolicy( context.getBucketName()); - S3BucketPolicy policy = result.getFirst(); - if ( null == policy ) - { - // -> do we have to load it from the database (any other value means there is no policy)? - if (-1 == result.getSecond().intValue()) - { - BucketPolicyDao policyDao = new BucketPolicyDao(); - String policyInJson = policyDao.getPolicy( context.getBucketName()); - // -> place in cache that no policy exists in the database - if (null == policyInJson) { - ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), null); - return null; - } - - PolicyParser parser = new PolicyParser(); - policy = parser.parse( policyInJson, context.getBucketName()); - if (null != policy) - ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), policy); - } - } - return policy; - } - - public static void verifyBucketName( String bucketName, boolean useDNSGuidelines ) throws InvalidBucketName - { - // [A] To comply with Amazon S3 basic requirements, bucket names must meet the following conditions - // -> must be between 3 and 255 characters long - int size = bucketName.length(); - if (3 > size || size > 255) - throw new InvalidBucketName( bucketName + " is not between 3 and 255 characters long" ); - - // -> must start with a number or letter - if (!Character.isLetterOrDigit( bucketName.charAt( 0 ))) - throw new InvalidBucketName( bucketName + " does not start with a number or letter" ); - - // -> can contain lowercase letters, numbers, periods (.), underscores (_), and dashes (-) - // -> the bucket name can also contain uppercase letters but it is not recommended - for( int i=0; i < bucketName.length(); i++ ) - { - char next = bucketName.charAt(i); - if (Character.isLetter( next )) continue; - else if (Character.isDigit( next )) continue; - else if ('.' == next) continue; - else if ('_' == next) continue; - else if ('-' == next) continue; - else throw new InvalidBucketName( bucketName + " contains the invalid character: " + next ); - } - - // -> must not be formatted as an IP address (e.g., 192.168.5.4) - String[] parts = bucketName.split( "\\." ); - if (4 == parts.length) - { - try { - int first = Integer.parseInt( parts[0] ); - int second = Integer.parseInt( parts[1] ); - int third = Integer.parseInt( parts[2] ); - int fourth = Integer.parseInt( parts[3] ); - throw new InvalidBucketName( bucketName + " is formatted as an IP address" ); - } - catch( NumberFormatException e ) - {throw new InvalidBucketName( bucketName);} - } - - - // [B] To conform with DNS requirements, Amazon recommends following these additional guidelines when creating buckets - // -> bucket names should be between 3 and 63 characters long - if (useDNSGuidelines) - { - // -> bucket names should be between 3 and 63 characters long - if (3 > size || size > 63) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " is not between 3 and 63 characters long" ); - - // -> bucket names should not contain underscores (_) - int pos = bucketName.indexOf( '_' ); - if (-1 != pos) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain underscores" ); - - // -> bucket names should not end with a dash - if (bucketName.endsWith( "-" )) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not end with a dash" ); - - // -> bucket names cannot contain two, adjacent periods - pos = bucketName.indexOf( ".." ); - if (-1 != pos) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \"..\"" ); - - // -> bucket names cannot contain dashes next to periods (e.g., "my-.bucket.com" and "my.-bucket" are invalid) - if (-1 != bucketName.indexOf( "-." ) || -1 != bucketName.indexOf( ".-" )) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \".-\" or \"-.\"" ); - } - } - - private static boolean hasPermission( List privileges, int requestedPermission ) - { - ListIterator it = privileges.listIterator(); - while( it.hasNext()) - { - // True providing the requested permission is contained in one or the granted rights for this user. False otherwise. - SAcl rights = (SAcl)it.next(); - int permission = rights.getPermission(); - if (requestedPermission == (permission & requestedPermission)) return true; - } - return false; - } - - /** - * ifRange is true and ifUnmodifiedSince or IfMatch fails then we return the entire object (indicated by - * returning a -1 as the function result. - * - * @param ifCond - conditional get defined by these tests - * @param lastModified - value used on ifModifiedSince or ifUnmodifiedSince - * @param ETag - value used on ifMatch and ifNoneMatch - * @param ifRange - using an if-Range HTTP functionality - * @return -1 means return the entire object with an HTTP 200 (not a subrange) - */ - private int conditionPassed( S3ConditionalHeaders ifCond, Date lastModified, String ETag, boolean ifRange ) - { - if (null == ifCond) return 200; - - if (0 > ifCond.ifModifiedSince( lastModified )) - return 304; - - if (0 > ifCond.ifUnmodifiedSince( lastModified )) - return (ifRange ? -1 : 412); - - if (0 > ifCond.ifMatchEtag( ETag )) - return (ifRange ? -1 : 412); - - if (0 > ifCond.ifNoneMatchEtag( ETag )) - return 412; - - return 200; - } -} \ No newline at end of file +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.TimeZone; +import java.util.UUID; + +import javax.servlet.http.HttpServletResponse; + +import org.apache.log4j.Logger; +import org.hibernate.LockMode; +import org.hibernate.Session; +import org.json.simple.parser.ParseException; + +import com.cloud.bridge.io.S3FileSystemBucketAdapter; +import com.cloud.bridge.model.MHost; +import com.cloud.bridge.model.MHostMount; +import com.cloud.bridge.model.SAcl; +import com.cloud.bridge.model.SBucket; +import com.cloud.bridge.model.SHost; +import com.cloud.bridge.model.SMeta; +import com.cloud.bridge.model.SObject; +import com.cloud.bridge.model.SObjectItem; +import com.cloud.bridge.persist.PersistContext; +import com.cloud.bridge.persist.dao.BucketPolicyDao; +import com.cloud.bridge.persist.dao.MHostDao; +import com.cloud.bridge.persist.dao.MHostMountDao; +import com.cloud.bridge.persist.dao.MultipartLoadDao; +import com.cloud.bridge.persist.dao.SAclDao; +import com.cloud.bridge.persist.dao.SBucketDao; +import com.cloud.bridge.persist.dao.SHostDao; +import com.cloud.bridge.persist.dao.SMetaDao; +import com.cloud.bridge.persist.dao.SObjectDao; +import com.cloud.bridge.persist.dao.SObjectItemDao; +import com.cloud.bridge.service.S3Constants; +import com.cloud.bridge.service.UserContext; +import com.cloud.bridge.service.controller.s3.ServiceProvider; +import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess; +import com.cloud.bridge.service.core.s3.S3CopyObjectRequest.MetadataDirective; +import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; +import com.cloud.bridge.service.core.s3.S3PolicyCondition.ConditionKeys; +import com.cloud.bridge.service.exception.HostNotMountedException; +import com.cloud.bridge.service.exception.InternalErrorException; +import com.cloud.bridge.service.exception.InvalidBucketName; +import com.cloud.bridge.service.exception.NoSuchObjectException; +import com.cloud.bridge.service.exception.ObjectAlreadyExistsException; +import com.cloud.bridge.service.exception.OutOfServiceException; +import com.cloud.bridge.service.exception.OutOfStorageException; +import com.cloud.bridge.service.exception.PermissionDeniedException; +import com.cloud.bridge.service.exception.UnsupportedException; +import com.cloud.bridge.util.DateHelper; +import com.cloud.bridge.util.PolicyParser; +import com.cloud.bridge.util.StringHelper; +import com.cloud.bridge.util.OrderedPair; +import com.cloud.bridge.util.Triple; + +/** + * @author Kelven Yang, John Zucker + * The CRUD control actions to be invoked from S3BucketAction or S3ObjectAction. + */ +public class S3Engine { + protected final static Logger logger = Logger.getLogger(S3Engine.class); + + private final int LOCK_ACQUIRING_TIMEOUT_SECONDS = 10; // ten seconds + + private final Map bucketAdapters = new HashMap(); + + public S3Engine() { + bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter()); + } + + + /** + * Return a S3CopyObjectResponse which represents an object being copied from source + * to destination bucket. + * Called from S3ObjectAction when copying an object. + * This can be treated as first a GET followed by a PUT of the object the user wants to copy. + */ + + public S3CopyObjectResponse handleRequest(S3CopyObjectRequest request) + { + S3CopyObjectResponse response = new S3CopyObjectResponse(); + + // [A] Get the object we want to copy + S3GetObjectRequest getRequest = new S3GetObjectRequest(); + getRequest.setBucketName(request.getSourceBucketName()); + getRequest.setKey(request.getSourceKey()); + getRequest.setVersion(request.getVersion()); + getRequest.setConditions( request.getConditions()); + + getRequest.setInlineData( true ); + getRequest.setReturnData( true ); + if ( MetadataDirective.COPY == request.getDirective()) + getRequest.setReturnMetadata( true ); + else getRequest.setReturnMetadata( false ); + + //-> before we do anything verify the permissions on a copy basis + String destinationBucketName = request.getDestinationBucketName(); + String destinationKeyName = request.getDestinationKey(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, destinationBucketName ); + context.setKeyName( destinationKeyName ); + context.setEvalParam( ConditionKeys.MetaData, request.getDirective().toString()); + context.setEvalParam( ConditionKeys.CopySource, "/" + request.getSourceBucketName() + "/" + request.getSourceKey()); + if (PolicyAccess.DENY == verifyPolicy( context )) + throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); + + S3GetObjectResponse originalObject = handleRequest(getRequest); + int resultCode = originalObject.getResultCode(); + if (200 != resultCode) { + response.setResultCode( resultCode ); + response.setResultDescription( originalObject.getResultDescription()); + return response; + } + + response.setCopyVersion( originalObject.getVersion()); + + + // [B] Put the object into the destination bucket + S3PutObjectInlineRequest putRequest = new S3PutObjectInlineRequest(); + putRequest.setBucketName(request.getDestinationBucketName()) ; + putRequest.setKey(destinationKeyName); + if ( MetadataDirective.COPY == request.getDirective()) + putRequest.setMetaEntries(originalObject.getMetaEntries()); + else putRequest.setMetaEntries(request.getMetaEntries()); + putRequest.setAcl(request.getAcl()); // -> if via a SOAP call + putRequest.setCannedAccess(request.getCannedAccess()); // -> if via a REST call + putRequest.setContentLength(originalObject.getContentLength()); + putRequest.setData(originalObject.getData()); + + S3PutObjectInlineResponse putResp = handleRequest(putRequest); + response.setResultCode( putResp.resultCode ); + response.setResultDescription( putResp.getResultDescription()); + response.setETag( putResp.getETag()); + response.setLastModified( putResp.getLastModified()); + response.setPutVersion( putResp.getVersion()); + return response; + } + + public S3CreateBucketResponse handleRequest(S3CreateBucketRequest request) + { + S3CreateBucketResponse response = new S3CreateBucketResponse(); + String cannedAccessPolicy = request.getCannedAccess(); + String bucketName = request.getBucketName(); + response.setBucketName( bucketName ); + + verifyBucketName( bucketName, false ); + + S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket, bucketName ); + context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy ); + if (PolicyAccess.DENY == verifyPolicy( context )) + throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); + + if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS)) + { + OrderedPair shost_storagelocation_pair = null; + boolean success = false; + try { + SBucketDao bucketDao = new SBucketDao(); + SAclDao aclDao = new SAclDao(); + + if (bucketDao.getByName(request.getBucketName()) != null) + throw new ObjectAlreadyExistsException("Bucket already exists"); + + shost_storagelocation_pair = allocBucketStorageHost(request.getBucketName(), null); + + SBucket sbucket = new SBucket(); + sbucket.setName(request.getBucketName()); + sbucket.setCreateTime(DateHelper.currentGMTTime()); + sbucket.setOwnerCanonicalId( UserContext.current().getCanonicalUserId()); + sbucket.setShost(shost_storagelocation_pair.getFirst()); + shost_storagelocation_pair.getFirst().getBuckets().add(sbucket); + bucketDao.save(sbucket); + + S3AccessControlList acl = request.getAcl(); + + if ( null != cannedAccessPolicy ) + setCannedAccessControls( cannedAccessPolicy, "SBucket", sbucket.getId(), sbucket ); + else if (null != acl) + aclDao.save( "SBucket", sbucket.getId(), acl ); + else setSingleAcl( "SBucket", sbucket.getId(), SAcl.PERMISSION_FULL ); + + // explicitly commit the transaction + PersistContext.commitTransaction(); + success = true; + } + finally + { + if(!success && shost_storagelocation_pair != null) { + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(shost_storagelocation_pair.getFirst()); + bucketAdapter.deleteContainer(shost_storagelocation_pair.getSecond(), request.getBucketName()); + } + PersistContext.releaseNamedLock("bucket.creation"); + } + + } else { + throw new OutOfServiceException("Unable to acquire synchronization lock"); + } + + return response; + } + + /** + * Return a S3Response which represents the effect of an object being deleted from its bucket. + * Called from S3BucketAction when deleting an object. + */ + + public S3Response handleRequest( S3DeleteBucketRequest request ) + { + S3Response response = new S3Response(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + + if ( sbucket != null ) + { + S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName ); + switch( verifyPolicy( context )) + { + case ALLOW: + // The bucket policy can give users permission to delete a bucket whereas ACLs cannot + break; + + case DENY: + throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); + + case DEFAULT_DENY: + default: + // Irrespective of what the ACLs say, only the owner can delete a bucket + String client = UserContext.current().getCanonicalUserId(); + if (!client.equals( sbucket.getOwnerCanonicalId())) { + throw new PermissionDeniedException( "Access Denied - only the owner can delete a bucket" ); + } + break; + } + + + // Delete the file from its storage location + OrderedPair host_storagelocation_pair = getBucketStorageHost(sbucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName()); + + // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. + // To delete SMeta & SAcl objects: + // (1)Get all the objects in the bucket, + // (2)then all the items in each object, + // (3) then all meta & acl data for each item + Set objectsInBucket = sbucket.getObjectsInBucket(); + Iterator it = objectsInBucket.iterator(); + while( it.hasNext()) + { + SObject oneObject = (SObject)it.next(); + Set itemsInObject = oneObject.getItems(); + Iterator is = itemsInObject.iterator(); + while( is.hasNext()) + { + SObjectItem oneItem = (SObjectItem)is.next(); + deleteMetaData( oneItem.getId()); + deleteObjectAcls( "SObjectItem", oneItem.getId()); + } + } + + // Delete all the policy state associated with the bucket + try { + ServiceProvider.getInstance().deleteBucketPolicy( bucketName ); + BucketPolicyDao policyDao = new BucketPolicyDao(); + policyDao.deletePolicy( bucketName ); + } + catch( Exception e ) { + logger.error("When deleting a bucket we must try to delete its policy: ", e); + } + + deleteBucketAcls( sbucket.getId()); + bucketDao.delete( sbucket ); + response.setResultCode(204); + response.setResultDescription("OK"); + } + else + { response.setResultCode(404); + response.setResultDescription("Bucket does not exist"); + } + return response; + } + + /** + * Return a S3ListBucketResponse which represents a list of up to 1000 objects contained ins the bucket. + * Called from S3BucketAction for GETting objects and for GETting object versions. + */ + + public S3ListBucketResponse listBucketContents(S3ListBucketRequest request, boolean includeVersions) + { + S3ListBucketResponse response = new S3ListBucketResponse(); + String bucketName = request.getBucketName(); + String prefix = request.getPrefix(); + if (prefix == null) prefix = StringHelper.EMPTY_STRING; + String marker = request.getMarker(); + if (marker == null) marker = StringHelper.EMPTY_STRING; + + String delimiter = request.getDelimiter(); + int maxKeys = request.getMaxKeys(); + if(maxKeys <= 0) maxKeys = 1000; + + SBucketDao bucketDao = new SBucketDao(); + SBucket sbucket = bucketDao.getByName(bucketName); + if (sbucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + PolicyActions action = (includeVersions ? PolicyActions.ListBucketVersions : PolicyActions.ListBucket); + S3PolicyContext context = new S3PolicyContext( action, bucketName ); + context.setEvalParam( ConditionKeys.MaxKeys, new String( "" + maxKeys )); + context.setEvalParam( ConditionKeys.Prefix, prefix ); + context.setEvalParam( ConditionKeys.Delimiter, delimiter ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ ); + + + // Wen execting the query, request one more item so that we know how to set isTruncated flag + SObjectDao sobjectDao = new SObjectDao(); + List l = null; + + if ( includeVersions ) + l = sobjectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 ); + else l = sobjectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 ); + + response.setBucketName(bucketName); + response.setMarker(marker); + response.setMaxKeys(maxKeys); + response.setPrefix(prefix); + response.setDelimiter(delimiter); + response.setTruncated(l.size() > maxKeys); + if(l.size() > maxKeys) { + response.setNextMarker(l.get(l.size() - 1).getNameKey()); + } + + // If needed - SOAP response does not support versioning + response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker())); + response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys)); + return response; + } + + /** + * Return a S3ListAllMyBucketResponse which represents a list of all buckets owned by the requester. + * Called from S3BucketAction for GETting all buckets. + * To check on bucket policies defined we have to (look for and) evaluate the policy on each + * bucket the user owns. + */ + public S3ListAllMyBucketsResponse handleRequest(S3ListAllMyBucketsRequest request) + { + S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse(); + SBucketDao bucketDao = new SBucketDao(); + + // "...you can only list buckets for which you are the owner." + List buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId()); + S3CanonicalUser owner = new S3CanonicalUser(); + owner.setID(UserContext.current().getCanonicalUserId()); + owner.setDisplayName(""); + response.setOwner(owner); + + if (buckets != null) + { + S3ListAllMyBucketsEntry[] entries = new S3ListAllMyBucketsEntry[buckets.size()]; + int i = 0; + for(SBucket bucket : buckets) + { + String bucketName = bucket.getName(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.ListAllMyBuckets, bucketName ); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_PASS ); + + entries[i] = new S3ListAllMyBucketsEntry(); + entries[i].setName(bucketName); + entries[i].setCreationDate(DateHelper.toCalendar(bucket.getCreateTime())); + i++; + } + response.setBuckets(entries); + } + return response; + } + + /** + * Return an S3Response representing the result of PUTTING the ACL of a given bucket. + * Called from S3BucketAction to PUT its ACL. + */ + + public S3Response handleRequest(S3SetBucketAccessControlPolicyRequest request) + { + S3Response response = new S3Response(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName(bucketName); + if(sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket does not exist"); + return response; + } + + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL ); + + SAclDao aclDao = new SAclDao(); + aclDao.save("SBucket", sbucket.getId(), request.getAcl()); + + response.setResultCode(200); + response.setResultDescription("OK"); + return response; + } + + + /** + * Return a S3AccessControlPolicy representing the ACL of a given bucket. + * Called from S3BucketAction to GET its ACL. + */ + + public S3AccessControlPolicy handleRequest(S3GetBucketAccessControlPolicyRequest request) + { + S3AccessControlPolicy policy = new S3AccessControlPolicy(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) + throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + S3CanonicalUser owner = new S3CanonicalUser(); + owner.setID(sbucket.getOwnerCanonicalId()); + owner.setDisplayName(""); + policy.setOwner(owner); + + S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketAcl, bucketName ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ_ACL ); + + SAclDao aclDao = new SAclDao(); + List grants = aclDao.listGrants("SBucket", sbucket.getId()); + policy.setGrants(S3Grant.toGrants(grants)); + return policy; + } + + /** + * This method should be called if a multipart upload is aborted OR has completed successfully and + * the individual parts have to be cleaned up. + * Called from S3ObjectAction when executing at completion or when aborting multipart upload. + * @param bucketName + * @param uploadId + * @param verifyPermission - If false then do not check the user's permission to clean up the state + */ + public int freeUploadParts(String bucketName, int uploadId, boolean verifyPermission) + { + // -> we need to look up the final bucket to figure out which mount point to use to save the part in + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); + return 404; + } + + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + OrderedPair exists = uploadDao.multipartExits( uploadId ); + if (null == exists) { + logger.error( "initiateMultipartUpload failed since multipart upload" + uploadId + " does not exist" ); + return 404; + } + + // -> the multipart initiator or bucket owner can do this action by default + if (verifyPermission) + { + String initiator = uploadDao.getInitiator( uploadId ); + if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) + { + // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket + S3PolicyContext context = new S3PolicyContext( PolicyActions.AbortMultipartUpload, bucketName ); + context.setKeyName( exists.getSecond()); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + } + } + + // -> first get a list of all the uploaded files and delete one by one + S3MultipartPart[] parts = uploadDao.getParts( uploadId, 10000, 0 ); + for( int i=0; i < parts.length; i++ ) + { + bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), parts[i].getPath()); + } + + uploadDao.deleteUpload( uploadId ); + return 204; + + } + catch( PermissionDeniedException e ) { + logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e); + throw e; + } + catch (Exception e) { + logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e); + return 500; + } + } + + /** + * The initiator must have permission to write to the bucket in question in order to initiate + * a multipart upload. Also check to make sure the special folder used to store parts of + * a multipart exists for this bucket. + * Called from S3ObjectAction during many stages of multipart upload. + */ + public S3PutObjectInlineResponse initiateMultipartUpload(S3PutObjectInlineRequest request) + { + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + String nameKey = request.getKey(); + + // -> does the bucket exist and can we write to it? + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); + response.setResultCode(404); + } + + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); + context.setKeyName( nameKey ); + context.setEvalParam( ConditionKeys.Acl, request.getCannedAccess()); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + + createUploadFolder( bucketName ); + + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + int uploadId = uploadDao.initiateUpload( UserContext.current().getAccessKey(), bucketName, nameKey, request.getCannedAccess(), request.getMetaEntries()); + response.setUploadId( uploadId ); + response.setResultCode(200); + + } catch( Exception e ) { + logger.error("initiateMultipartUpload exception: ", e); + response.setResultCode(500); + } + + return response; + } + + /** + * Save the object fragment in a special (i.e., hidden) directory inside the same mount point as + * the bucket location that the final object will be stored in. + * Called from S3ObjectAction during many stages of multipart upload. + * @param request + * @param uploadId + * @param partNumber + * @return S3PutObjectInlineResponse + */ + public S3PutObjectInlineResponse saveUploadPart(S3PutObjectInlineRequest request, int uploadId, int partNumber) + { + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + + // -> we need to look up the final bucket to figure out which mount point to use to save the part in + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "saveUploadedPart failed since " + bucketName + " does not exist" ); + response.setResultCode(404); + } + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); + context.setKeyName( request.getKey()); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = new String( uploadId + "-" + partNumber ); + InputStream is = null; + + try { + is = request.getDataInputStream(); + String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName); + response.setETag(md5Checksum); + + MultipartLoadDao uploadDao = new MultipartLoadDao(); + uploadDao.savePart( uploadId, partNumber, md5Checksum, itemFileName, (int)request.getContentLength()); + response.setResultCode(200); + + } catch (IOException e) { + logger.error("UploadPart failed due to " + e.getMessage(), e); + response.setResultCode(500); + } catch (OutOfStorageException e) { + logger.error("UploadPart failed due to " + e.getMessage(), e); + response.setResultCode(500); + } catch (Exception e) { + logger.error("UploadPart failed due to " + e.getMessage(), e); + response.setResultCode(500); + } finally { + if(is != null) { + try { + is.close(); + } catch (IOException e) { + logger.error("UploadPart unable to close stream from data handler.", e); + } + } + } + + return response; + } + + /** + * Create the real object represented by all the parts of the multipart upload. + * Called from S3ObjectAction at completion of multipart upload. + * @param httpResp - Servlet response handle to return the headers of the response (including version header) + * @param request - Normal parameters needed to create a new object (including metadata) + * @param parts - List of files that make up the multipart + * @param outputStream - Response output stream + * N.B. - This method can be long-lasting + * We are required to keep the connection alive by returning whitespace characters back periodically. + */ + + public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream outputStream) throws IOException + { + // [A] Set up and initial error checking + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + String key = request.getKey(); + S3MetaDataEntry[] meta = request.getMetaEntries(); + + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "completeMultipartUpload( failed since " + bucketName + " does not exist" ); + response.setResultCode(404); + } + + // [B] Now we need to create the final re-assembled object + // -> the allocObjectItem checks for the bucket policy PutObject permissions + OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, null, request.getCannedAccess()); + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); + + // -> Amazon defines that we must return a 200 response immediately to the client, but + // -> we don't know the version header until we hit here + httpResp.setStatus(200); + httpResp.setContentType("text/xml; charset=UTF-8"); + String version = object_objectitem_pair.getSecond().getVersion(); + if (null != version) httpResp.addHeader( "x-amz-version-id", version ); + httpResp.flushBuffer(); + + + // [C] Re-assemble the object from its uploaded file parts + try { + // explicit transaction control to avoid holding transaction during long file concatenation process + PersistContext.commitTransaction(); + + OrderedPair result = bucketAdapter. + concatentateObjects + ( host_storagelocation_pair.getSecond(), + bucket.getName(), + itemFileName, + ServiceProvider.getInstance().getMultipartDir(), + parts, + outputStream ); + response.setETag(result.getFirst()); + response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); + + SObjectItemDao itemDao = new SObjectItemDao(); + SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); + item.setMd5(result.getFirst()); + item.setStoredSize(result.getSecond().longValue()); + response.setResultCode(200); + + PersistContext.getSession().save(item); + } + catch (Exception e) { + logger.error("completeMultipartUpload failed due to " + e.getMessage(), e); + } + return response; + } + + /** + * Return a S3PutObjectInlineResponse which represents an object being created into a bucket + * Called from S3ObjectAction when PUTting or POTing an object. + */ + + public S3PutObjectInlineResponse handleRequest(S3PutObjectInlineRequest request) + { + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + String key = request.getKey(); + long contentLength = request.getContentLength(); + S3MetaDataEntry[] meta = request.getMetaEntries(); + S3AccessControlList acl = request.getAcl(); + + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + + // Is the caller allowed to write the object? + // The allocObjectItem checks for the bucket policy PutObject permissions + OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess()); + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); + InputStream is = null; + + try { + // explicit transaction control to avoid holding transaction during file-copy process + PersistContext.commitTransaction(); + + is = request.getDataInputStream(); + String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); + response.setETag(md5Checksum); + response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); + response.setVersion( object_objectitem_pair.getSecond().getVersion()); + + SObjectItemDao itemDao = new SObjectItemDao(); + SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); + item.setMd5(md5Checksum); + item.setStoredSize(contentLength); + PersistContext.getSession().save(item); + + } catch (IOException e) { + logger.error("PutObjectInline failed due to " + e.getMessage(), e); + } catch (OutOfStorageException e) { + logger.error("PutObjectInline failed due to " + e.getMessage(), e); + } finally { + if(is != null) { + try { + is.close(); + } catch (IOException e) { + logger.error("PutObjectInline unable to close stream from data handler.", e); + } + } + } + + return response; + } + + /** + * Return a S3PutObjectResponse which represents an object being created into a bucket + * Called from S3RestServlet when processing a DIME request. + */ + + public S3PutObjectResponse handleRequest(S3PutObjectRequest request) + { + S3PutObjectResponse response = new S3PutObjectResponse(); + String bucketName = request.getBucketName(); + String key = request.getKey(); + long contentLength = request.getContentLength(); + S3MetaDataEntry[] meta = request.getMetaEntries(); + S3AccessControlList acl = request.getAcl(); + + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + // Is the caller allowed to write the object? + // The allocObjectItem checks for the bucket policy PutObject permissions + OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null); + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); + InputStream is = null; + try { + // explicit transaction control to avoid holding transaction during file-copy process + PersistContext.commitTransaction(); + + is = request.getInputStream(); + String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); + response.setETag(md5Checksum); + response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); + + SObjectItemDao itemDao = new SObjectItemDao(); + SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); + item.setMd5(md5Checksum); + item.setStoredSize(contentLength); + PersistContext.getSession().save(item); + + } catch (OutOfStorageException e) { + logger.error("PutObject failed due to " + e.getMessage(), e); + } finally { + if(is != null) { + try { + is.close(); + } catch (IOException e) { + logger.error("Unable to close stream from data handler.", e); + } + } + } + + return response; + } + + /** + * The ACL of an object is set at the object version level. By default, PUT sets the ACL of the latest + * version of an object. To set the ACL of a different version, using the versionId subresource. + * Called from S3ObjectAction to PUT an object's ACL. + */ + + public S3Response handleRequest(S3SetObjectAccessControlPolicyRequest request) + { + S3PolicyContext context = null; + + // [A] First find the object in the bucket + S3Response response = new S3Response(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + if(sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket " + bucketName + "does not exist"); + return response; + } + + SObjectDao sobjectDao = new SObjectDao(); + String nameKey = request.getKey(); + SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey ); + if(sobject == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " in bucket " + bucketName + " does not exist"); + return response; + } + + String deletionMark = sobject.getDeletionMark(); + if (null != deletionMark) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); + return response; + } + + + // [B] Versioning allow the client to ask for a specific version not just the latest + SObjectItem item = null; + int versioningStatus = sbucket.getVersioningStatus(); + String wantVersion = request.getVersion(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) + item = sobject.getVersion( wantVersion ); + else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); + + if (item == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); + return response; + } + + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { + context = new S3PolicyContext( PolicyActions.PutObjectAclVersion, bucketName ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + response.setVersion( item.getVersion()); + } + else context = new S3PolicyContext( PolicyActions.PutObjectAcl, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_WRITE_ACL ); + + // -> the acl always goes on the instance of the object + SAclDao aclDao = new SAclDao(); + aclDao.save("SObjectItem", item.getId(), request.getAcl()); + + response.setResultCode(200); + response.setResultDescription("OK"); + return response; + } + + /** + * By default, GET returns ACL information about the latest version of an object. To return ACL + * information about a different version, use the versionId subresource + * Called from S3ObjectAction to get an object's ACL. + */ + + public S3AccessControlPolicy handleRequest(S3GetObjectAccessControlPolicyRequest request) + { + S3PolicyContext context = null; + + // [A] Does the object exist that holds the ACL we are looking for? + S3AccessControlPolicy policy = new S3AccessControlPolicy(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) + throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + SObjectDao sobjectDao = new SObjectDao(); + String nameKey = request.getKey(); + SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey ); + if (sobject == null) + throw new NoSuchObjectException("Object " + request.getKey() + " does not exist"); + + String deletionMark = sobject.getDeletionMark(); + if (null != deletionMark) { + policy.setResultCode(404); + policy.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); + return policy; + } + + + // [B] Versioning allow the client to ask for a specific version not just the latest + SObjectItem item = null; + int versioningStatus = sbucket.getVersioningStatus(); + String wantVersion = request.getVersion(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) + item = sobject.getVersion( wantVersion ); + else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); + + if (item == null) { + policy.setResultCode(404); + policy.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); + return policy; + } + + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { + context = new S3PolicyContext( PolicyActions.GetObjectVersionAcl, bucketName ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + policy.setVersion( item.getVersion()); + } + else context = new S3PolicyContext( PolicyActions.GetObjectAcl, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ_ACL ); + + + // [C] ACLs are ALWAYS on an instance of the object + S3CanonicalUser owner = new S3CanonicalUser(); + owner.setID(sobject.getOwnerCanonicalId()); + owner.setDisplayName(""); + policy.setOwner(owner); + policy.setResultCode(200); + + SAclDao aclDao = new SAclDao(); + List grants = aclDao.listGrants( "SObjectItem", item.getId()); + policy.setGrants(S3Grant.toGrants(grants)); + return policy; + } + + /** + * Handle requests for GET object and HEAD "get object extended" + * Called from S3ObjectAction for GET and HEAD of an object. + */ + + public S3GetObjectResponse handleRequest(S3GetObjectRequest request) + { + S3GetObjectResponse response = new S3GetObjectResponse(); + S3PolicyContext context = null; + boolean ifRange = false; + long bytesStart = request.getByteRangeStart(); + long bytesEnd = request.getByteRangeEnd(); + int resultCode = 200; + + // [A] Verify that the bucket and the object exist + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName(bucketName); + if (sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket " + request.getBucketName() + " does not exist"); + return response; + } + + SObjectDao objectDao = new SObjectDao(); + String nameKey = request.getKey(); + SObject sobject = objectDao.getByNameKey( sbucket, nameKey ); + if (sobject == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " does not exist in bucket " + request.getBucketName()); + return response; + } + + String deletionMark = sobject.getDeletionMark(); + if (null != deletionMark) { + response.setDeleteMarker( deletionMark ); + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); + return response; + } + + + // [B] Versioning allow the client to ask for a specific version not just the latest + SObjectItem item = null; + int versioningStatus = sbucket.getVersioningStatus(); + String wantVersion = request.getVersion(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) + item = sobject.getVersion( wantVersion ); + else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); + + if (item == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); + return response; + } + + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { + context = new S3PolicyContext( PolicyActions.GetObjectVersion, bucketName ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + } + else context = new S3PolicyContext( PolicyActions.GetObject, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ ); + + + // [C] Handle all the IFModifiedSince ... conditions, and access privileges + // -> http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 (HTTP If-Range header) + if (request.isReturnCompleteObjectOnConditionFailure() && (0 <= bytesStart && 0 <= bytesEnd)) ifRange = true; + + resultCode = conditionPassed( request.getConditions(), item.getLastModifiedTime(), item.getMd5(), ifRange ); + if ( -1 == resultCode ) { + // -> If-Range implementation, we have to return the entire object + resultCode = 200; + bytesStart = -1; + bytesEnd = -1; + } + else if (200 != resultCode) { + response.setResultCode( resultCode ); + response.setResultDescription( "Precondition Failed" ); + return response; + } + + + // [D] Return the contents of the object inline + // -> extract the meta data that corresponds the specific versioned item + SMetaDao metaDao = new SMetaDao(); + List itemMetaData = metaDao.getByTarget( "SObjectItem", item.getId()); + if (null != itemMetaData) + { + int i = 0; + S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ]; + ListIterator it = itemMetaData.listIterator(); + while( it.hasNext()) { + SMeta oneTag = (SMeta)it.next(); + S3MetaDataEntry oneEntry = new S3MetaDataEntry(); + oneEntry.setName( oneTag.getName()); + oneEntry.setValue( oneTag.getValue()); + metaEntries[i++] = oneEntry; + } + response.setMetaEntries( metaEntries ); + } + + // -> support a single byte range + if ( 0 <= bytesStart && 0 <= bytesEnd ) { + response.setContentLength( bytesEnd - bytesStart ); + resultCode = 206; + } + else response.setContentLength( item.getStoredSize()); + + if(request.isReturnData()) + { + response.setETag(item.getMd5()); + response.setLastModified(DateHelper.toCalendar( item.getLastModifiedTime())); + response.setVersion( item.getVersion()); + if (request.isInlineData()) + { + OrderedPair tupleSHostInfo = getBucketStorageHost(sbucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst()); + + if ( 0 <= bytesStart && 0 <= bytesEnd ) + response.setData(bucketAdapter.loadObjectRange(tupleSHostInfo.getSecond(), + request.getBucketName(), item.getStoredPath(), bytesStart, bytesEnd )); + else response.setData(bucketAdapter.loadObject(tupleSHostInfo.getSecond(), request.getBucketName(), item.getStoredPath())); + } + } + + response.setResultCode( resultCode ); + response.setResultDescription("OK"); + return response; + } + + /** + * Handle object deletion requests, both versioning and non-versioning requirements. + * Called from S3ObjectAction for deletion. + */ + public S3Response handleRequest(S3DeleteObjectRequest request) + { + // Verify that the bucket and object exist + S3Response response = new S3Response(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket " + bucketName + " does not exist"); + return response; + } + + SObjectDao objectDao = new SObjectDao(); + String nameKey = request.getKey(); + SObject sobject = objectDao.getByNameKey( sbucket, nameKey ); + if (sobject == null) { + response.setResultCode(404); + response.setResultDescription("No object with key " + nameKey + " exists in bucket " + bucketName); + return response; + } + + + // Discover whether versioning is enabled. If so versioning requires the setting of a deletion marker. + String storedPath = null; + SObjectItem item = null; + int versioningStatus = sbucket.getVersioningStatus(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) + { + String wantVersion = request.getVersion(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObjectVersion, bucketName ); + context.setKeyName( nameKey ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); + + if (null == wantVersion) { + // If versioning is on and no versionId is given then we just write a deletion marker + sobject.setDeletionMark( UUID.randomUUID().toString()); + objectDao.update( sobject ); + } + else { + // Otherwise remove the deletion marker if this has been set + String deletionMarker = sobject.getDeletionMark(); + if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) { + sobject.setDeletionMark( null ); + objectDao.update( sobject ); + response.setResultCode(204); + return response; + } + + // If versioning is on and the versionId is given (non-null) then delete the object matching that version + if ( null == (item = sobject.getVersion( wantVersion ))) { + response.setResultCode(404); + return response; + } + else { + // Providing versionId is non-null, then just delete the one item that matches the versionId from the database + storedPath = item.getStoredPath(); + sobject.deleteItem( item.getId()); + objectDao.update( sobject ); + } + } + } + else + { // If versioning is off then we do delete the null object + S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObject, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); + + if ( null == (item = sobject.getLatestVersion( true ))) { + response.setResultCode(404); + return response; + } + else { + // If there is no item with a null version then we are done + if (null == item.getVersion()) { + // Otherwiswe remove the entire object + // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects. + storedPath = item.getStoredPath(); + deleteMetaData( item.getId()); + deleteObjectAcls( "SObjectItem", item.getId()); + objectDao.delete( sobject ); + } + } + } + + // Delete the file holding the object + if (null != storedPath) + { + OrderedPair host_storagelocation_pair = getBucketStorageHost( sbucket ); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( host_storagelocation_pair.getFirst()); + bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), bucketName, storedPath ); + } + + response.setResultCode(204); + return response; + } + + + private void deleteMetaData( long itemId ) { + SMetaDao metaDao = new SMetaDao(); + List itemMetaData = metaDao.getByTarget( "SObjectItem", itemId ); + if (null != itemMetaData) + { + ListIterator it = itemMetaData.listIterator(); + while( it.hasNext()) { + SMeta oneTag = (SMeta)it.next(); + metaDao.delete( oneTag ); + } + } + } + + private void deleteObjectAcls( String target, long itemId ) { + SAclDao aclDao = new SAclDao(); + List itemAclData = aclDao.listGrants( target, itemId ); + if (null != itemAclData) + { + ListIterator it = itemAclData.listIterator(); + while( it.hasNext()) { + SAcl oneTag = (SAcl)it.next(); + aclDao.delete( oneTag ); + } + } + } + + private void deleteBucketAcls( long bucketId ) { + SAclDao aclDao = new SAclDao(); + List bucketAclData = aclDao.listGrants( "SBucket", bucketId ); + if (null != bucketAclData) + { + ListIterator it = bucketAclData.listIterator(); + while( it.hasNext()) { + SAcl oneTag = (SAcl)it.next(); + aclDao.delete( oneTag ); + } + } + } + + private S3ListBucketPrefixEntry[] composeListBucketPrefixEntries(List l, String prefix, String delimiter, int maxKeys) + { + List entries = new ArrayList(); + int count = 0; + + for(SObject sobject : l) + { + if(delimiter != null && !delimiter.isEmpty()) + { + String subName = StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter); + if(subName != null) + { + S3ListBucketPrefixEntry entry = new S3ListBucketPrefixEntry(); + if ( prefix != null && prefix.length() > 0) + entry.setPrefix(prefix + delimiter + subName); + else entry.setPrefix(subName); + } + } + count++; + if(count >= maxKeys) break; + } + + if(entries.size() > 0) return entries.toArray(new S3ListBucketPrefixEntry[0]); + return null; + } + + /** + * The 'versionIdMarker' parameter only makes sense if enableVersion is true. + * versionIdMarker is the starting point to return information back. So for example if an + * object has versions 1,2,3,4,5 and the versionIdMarker is '3', then 3,4,5 will be returned + * by this function. If the versionIdMarker is null then all versions are returned. + * + * TODO - how does the versionIdMarker work when there is a deletion marker in the object? + */ + private S3ListBucketObjectEntry[] composeListBucketContentEntries(List l, String prefix, String delimiter, int maxKeys, boolean enableVersion, String versionIdMarker) + { + List entries = new ArrayList(); + SObjectItem latest = null; + boolean hitIdMarker = false; + int count = 0; + + for( SObject sobject : l ) + { + if (delimiter != null && !delimiter.isEmpty()) + { + if (StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter) != null) + continue; + } + + if (enableVersion) + { + hitIdMarker = (null == versionIdMarker ? true : false); + + // This supports GET REST calls with /?versions + String deletionMarker = sobject.getDeletionMark(); + if ( null != deletionMarker ) + { + // TODO we should also save the timestamp when something is deleted + S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); + entry.setKey(sobject.getNameKey()); + entry.setVersion( deletionMarker ); + entry.setIsLatest( true ); + entry.setIsDeletionMarker( true ); + entry.setLastModified( Calendar.getInstance( TimeZone.getTimeZone("GMT") )); + entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); + entry.setOwnerDisplayName(""); + entries.add( entry ); + latest = null; + } + else latest = sobject.getLatestVersion( false ); + + Iterator it = sobject.getItems().iterator(); + while( it.hasNext()) + { + SObjectItem item = (SObjectItem)it.next(); + + if ( !hitIdMarker ) + { + if (item.getVersion().equalsIgnoreCase( versionIdMarker )) { + hitIdMarker = true; + entries.add( toListEntry( sobject, item, latest )); + } + } + else entries.add( toListEntry( sobject, item, latest )); + } + } + else + { // -> if there are multiple versions of an object then just return its last version + Iterator it = sobject.getItems().iterator(); + SObjectItem lastestItem = null; + int maxVersion = 0; + int version = 0; + while(it.hasNext()) + { + SObjectItem item = (SObjectItem)it.next(); + String versionStr = item.getVersion(); + + if ( null != versionStr ) + version = Integer.parseInt(item.getVersion()); + else lastestItem = item; + + // -> if the bucket has versions turned on + if (version > maxVersion) { + maxVersion = version; + lastestItem = item; + } + } + if (lastestItem != null) { + entries.add( toListEntry( sobject, lastestItem, null )); + } + } + + count++; + if(count >= maxKeys) break; + } + + if ( entries.size() > 0 ) + return entries.toArray(new S3ListBucketObjectEntry[0]); + else return null; + } + + private static S3ListBucketObjectEntry toListEntry( SObject sobject, SObjectItem item, SObjectItem latest ) + { + S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); + entry.setKey(sobject.getNameKey()); + entry.setVersion( item.getVersion()); + entry.setETag( "\"" + item.getMd5() + "\"" ); + entry.setSize(item.getStoredSize()); + entry.setStorageClass( "STANDARD" ); + entry.setLastModified(DateHelper.toCalendar(item.getLastModifiedTime())); + entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); + entry.setOwnerDisplayName(""); + + if (null != latest && item == latest) entry.setIsLatest( true ); + return entry; + } + + private OrderedPair getBucketStorageHost(SBucket bucket) + { + MHostMountDao mountDao = new MHostMountDao(); + + SHost shost = bucket.getShost(); + if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_LOCAL) { + return new OrderedPair(shost, shost.getExportRoot()); + } + + MHostMount mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId()); + if(mount != null) { + return new OrderedPair(shost, mount.getMountPath()); + } + + // need to redirect request to other node + throw new HostNotMountedException("Storage host " + shost.getHost() + " is not locally mounted"); + } + + /** + * Locate the folder to hold upload parts at the same mount point as the upload's final bucket + * location. Create the upload folder dynamically. + * + * @param bucketName + */ + private void createUploadFolder(String bucketName) + { + if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS)) + { + try { + allocBucketStorageHost(bucketName, ServiceProvider.getInstance().getMultipartDir()); + } + finally { + PersistContext.releaseNamedLock("bucket.creation"); + } + } + } + + /** + * The overrideName is used to create a hidden storage bucket (folder) in the same location + * as the given bucketName. This can be used to create a folder for parts of a multipart + * upload for the associated bucket. + * + * @param bucketName + * @param overrideName + * @return + */ + private OrderedPair allocBucketStorageHost(String bucketName, String overrideName) + { + MHostDao mhostDao = new MHostDao(); + SHostDao shostDao = new SHostDao(); + + MHost mhost = mhostDao.get(ServiceProvider.getInstance().getManagementHostId()); + if(mhost == null) + throw new OutOfServiceException("Temporarily out of service"); + + if(mhost.getMounts().size() > 0) { + Random random = new Random(); + MHostMount[] mounts = (MHostMount[])mhost.getMounts().toArray(); + MHostMount mount = mounts[random.nextInt(mounts.length)]; + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(mount.getShost()); + bucketAdapter.createContainer(mount.getMountPath(), (null != overrideName ? overrideName : bucketName)); + return new OrderedPair(mount.getShost(), mount.getMountPath()); + } + + // To make things simple, only allow one local mounted storage root TODO - Change in the future + String localStorageRoot = ServiceProvider.getInstance().getStartupProperties().getProperty("storage.root"); + if(localStorageRoot != null) { + SHost localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot); + if(localSHost == null) + throw new InternalErrorException("storage.root is configured but not initialized"); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(localSHost); + bucketAdapter.createContainer(localSHost.getExportRoot(),(null != overrideName ? overrideName : bucketName)); + return new OrderedPair(localSHost, localStorageRoot); + } + + throw new OutOfStorageException("No storage host is available"); + } + + public S3BucketAdapter getStorageHostBucketAdapter(SHost shost) + { + S3BucketAdapter adapter = bucketAdapters.get(shost.getHostType()); + if(adapter == null) + throw new InternalErrorException("Bucket adapter is not installed for host type: " + shost.getHostType()); + + return adapter; + } + + /** + * If acl is set then the cannedAccessPolicy parameter should be null and is ignored. + * The cannedAccessPolicy parameter is for REST Put requests only where a simple set of ACLs can be + * created with a single header value. Note that we do not currently support "anonymous" un-authenticated + * access in our implementation. + * + * @throws IOException + */ + @SuppressWarnings("deprecation") + public OrderedPair allocObjectItem(SBucket bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy) + { + SObjectDao objectDao = new SObjectDao(); + SObjectItemDao objectItemDao = new SObjectItemDao(); + SMetaDao metaDao = new SMetaDao(); + SAclDao aclDao = new SAclDao(); + SObjectItem item = null; + int versionSeq = 1; + int versioningStatus = bucket.getVersioningStatus(); + + Session session = PersistContext.getSession(); + + // [A] To write into a bucket the user must have write permission to that bucket + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucket.getName()); + context.setKeyName( nameKey ); + context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy); + + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); // TODO - check this validates plain POSTs + + // [B] If versioning is off them we over write a null object item + SObject object = objectDao.getByNameKey(bucket, nameKey); + if ( object != null ) + { + // -> if versioning is on create new object items + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) + { + session.lock(object, LockMode.UPGRADE); + versionSeq = object.getNextSequence(); + object.setNextSequence(versionSeq + 1); + session.save(object); + + item = new SObjectItem(); + item.setTheObject(object); + object.getItems().add(item); + item.setVersion(String.valueOf(versionSeq)); + Date ts = DateHelper.currentGMTTime(); + item.setCreateTime(ts); + item.setLastAccessTime(ts); + item.setLastModifiedTime(ts); + session.save(item); + } + else + { // -> find an object item with a null version, can be null + // if bucket started out with versioning enabled and was then suspended + item = objectItemDao.getByObjectIdNullVersion( object.getId()); + if (item == null) + { + item = new SObjectItem(); + item.setTheObject(object); + object.getItems().add(item); + Date ts = DateHelper.currentGMTTime(); + item.setCreateTime(ts); + item.setLastAccessTime(ts); + item.setLastModifiedTime(ts); + session.save(item); + } + } + } + else + { // -> there is no object nor an object item + object = new SObject(); + object.setBucket(bucket); + object.setNameKey(nameKey); + object.setNextSequence(2); + object.setCreateTime(DateHelper.currentGMTTime()); + object.setOwnerCanonicalId(UserContext.current().getCanonicalUserId()); + session.save(object); + + item = new SObjectItem(); + item.setTheObject(object); + object.getItems().add(item); + if (SBucket.VERSIONING_ENABLED == versioningStatus) item.setVersion(String.valueOf(versionSeq)); + Date ts = DateHelper.currentGMTTime(); + item.setCreateTime(ts); + item.setLastAccessTime(ts); + item.setLastModifiedTime(ts); + session.save(item); + } + + + // [C] We will use the item DB id as the file name, MD5/contentLength will be stored later + String suffix = null; + int dotPos = nameKey.lastIndexOf('.'); + if (dotPos >= 0) suffix = nameKey.substring(dotPos); + if ( suffix != null ) + item.setStoredPath(String.valueOf(item.getId()) + suffix); + else item.setStoredPath(String.valueOf(item.getId())); + + metaDao.save("SObjectItem", item.getId(), meta); + + + // [D] Are we setting an ACL along with the object + // -> the ACL is ALWAYS set on a particular instance of the object (i.e., a version) + if ( null != cannedAccessPolicy ) + { + setCannedAccessControls( cannedAccessPolicy, "SObjectItem", item.getId(), bucket ); + } + else if (null == acl || 0 == acl.size()) + { + // -> this is termed the "private" or default ACL, "Owner gets FULL_CONTROL" + setSingleAcl( "SObjectItem", item.getId(), SAcl.PERMISSION_FULL ); + } + else if (null != acl) { + aclDao.save( "SObjectItem", item.getId(), acl ); + } + + session.update(item); + return new OrderedPair(object, item); + } + + + /** + * Access controls that are specified via the "x-amz-acl:" headers in REST requests. + * Note that canned policies can be set when the object's contents are set + */ + public void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucket bucket ) + { + // Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy + Triple permission_permission_symbol_triple = + SAcl.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId()); + if ( null == permission_permission_symbol_triple.getThird() ) + setSingleAcl(target, objectId, permission_permission_symbol_triple.getFirst()); + else + { setDefaultAcls( target, + objectId, + permission_permission_symbol_triple.getFirst(), // permission according to ownership of object + permission_permission_symbol_triple.getSecond(), // permission according to ownership of bucket + permission_permission_symbol_triple.getThird() ); // "symbol" to indicate principal or otherwise name of owner + + } + } + + + private void setSingleAcl( String target, long targetId, int permission ) + { + SAclDao aclDao = new SAclDao(); + S3AccessControlList defaultAcl = new S3AccessControlList(); + + // -> if an annoymous request, then do not rewrite the ACL + String userId = UserContext.current().getCanonicalUserId(); + if (0 < userId.length()) + { + S3Grant defaultGrant = new S3Grant(); + defaultGrant.setGrantee(SAcl.GRANTEE_USER); + defaultGrant.setCanonicalUserID( userId ); + defaultGrant.setPermission( permission ); + defaultAcl.addGrant( defaultGrant ); + aclDao.save( target, targetId, defaultAcl ); + } + } + + + /** + * The Cloud Stack API Access key is used for for the Canonical User Id everywhere (buckets and objects). + * + * @param owner - this can be the Cloud Access Key for a bucket owner or one of the + * following special symbols: + * (a) '*' - any principal authenticated user (i.e., any user with a registered Cloud Access Key) + * (b) 'A' - any anonymous principal (i.e., S3 request without an Authorization header) + */ + private void setDefaultAcls( String target, long objectId, int permission1, int permission2, String owner ) + { + SAclDao aclDao = new SAclDao(); + S3AccessControlList defaultAcl = new S3AccessControlList(); + + // -> object owner + S3Grant defaultGrant = new S3Grant(); + defaultGrant.setGrantee(SAcl.GRANTEE_USER); + defaultGrant.setCanonicalUserID( UserContext.current().getCanonicalUserId()); + defaultGrant.setPermission( permission1 ); + defaultAcl.addGrant( defaultGrant ); + + // -> bucket owner + defaultGrant = new S3Grant(); + defaultGrant.setGrantee(SAcl.GRANTEE_USER); + defaultGrant.setCanonicalUserID( owner ); + defaultGrant.setPermission( permission2 ); + defaultAcl.addGrant( defaultGrant ); + aclDao.save( target, objectId, defaultAcl ); + } + + public static PolicyAccess verifyPolicy( S3PolicyContext context ) + { + S3BucketPolicy policy = null; + + // Ordinarily a REST request will pass in an S3PolicyContext for a given bucket by this stage. The HttpServletRequest object + // should be held in the UserContext ready for extraction of the S3BucketPolicy. + // If there is an error in obtaining the request object or in loading the policy then log the failure and return a S3PolicyContext + // which indicates DEFAULT_DENY. Where there is no failure, the policy returned should be specific to the Canonical User ID of the requester. + + try { + // -> in SOAP the HttpServletRequest object is hidden and not passed around + if (null != context) { + context.setHttp( UserContext.current().getHttp()); + policy = loadPolicy( context ); + } + + if ( null != policy ) + return policy.eval(context, UserContext.current().getCanonicalUserId()); + else return PolicyAccess.DEFAULT_DENY; + } + catch( Exception e ) { + logger.error("verifyAccess - loadPolicy failed, bucket: " + context.getBucketName() + " policy ignored", e); + return PolicyAccess.DEFAULT_DENY; + } + } + + /** + * To determine access to a bucket or an object in a bucket evaluate first a define + * bucket policy and then any defined ACLs. + * + * @param context - all data needed for bucket policies + * @param target - used for ACL evaluation, object identifier + * @param targetId - used for ACL evaluation + * @param requestedPermission - ACL type access requested + * + * @throws ParseException, SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException + */ + public static void verifyAccess( S3PolicyContext context, String target, long targetId, int requestedPermission ) + { + switch( verifyPolicy( context ) ) { + case ALLOW: // overrides ACLs (?) + return; + + case DENY: + throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); + + case DEFAULT_DENY: + default: + accessAllowed( target, targetId, requestedPermission ); + break; + } + } + + /** + * This method verifies that the accessing client has the requested + * permission on the object/bucket/Acl represented by the tuple: + * + * For cases where an ACL is meant for any authenticated user we place a "*" for the + * Canonical User Id. N.B. - "*" is not a legal Cloud (Bridge) Access key. + * + * For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the + * Canonical User Id. N.B. - "A" is not a legal Cloud (Bridge) Access key. + */ + public static void accessAllowed( String target, long targetId, int requestedPermission ) + { + if (SAcl.PERMISSION_PASS == requestedPermission) return; + + SAclDao aclDao = new SAclDao(); + + // If an annoymous request, then canonicalUserId is an empty string + String userId = UserContext.current().getCanonicalUserId(); + if ( 0 == userId.length()) + { + // Is an anonymous principal ACL set for this ? + if (hasPermission( aclDao.listGrants( target, targetId, "A" ), requestedPermission )) return; + } + else + { + if (hasPermission( aclDao.listGrants( target, targetId, userId ), requestedPermission )) return; + // Or alternatively is there is any principal authenticated ACL set for this ? + if (hasPermission( aclDao.listGrants( target, targetId, "*" ), requestedPermission )) return; + } + // No privileges implies that no access is allowed in the case of an anonymous user + throw new PermissionDeniedException( "Access Denied - ACLs do not give user the required permission" ); + } + + /** + * This method assumes that the bucket has been tested to make sure it exists before + * it is called. + * + * @param context + * @return S3BucketPolicy + * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException, ParseException + */ + public static S3BucketPolicy loadPolicy( S3PolicyContext context ) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException + { + OrderedPair result = ServiceProvider.getInstance().getBucketPolicy( context.getBucketName()); + S3BucketPolicy policy = result.getFirst(); + if ( null == policy ) + { + // -> do we have to load it from the database (any other value means there is no policy)? + if (-1 == result.getSecond().intValue()) + { + BucketPolicyDao policyDao = new BucketPolicyDao(); + String policyInJson = policyDao.getPolicy( context.getBucketName()); + // -> place in cache that no policy exists in the database + if (null == policyInJson) { + ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), null); + return null; + } + + PolicyParser parser = new PolicyParser(); + policy = parser.parse( policyInJson, context.getBucketName()); + if (null != policy) + ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), policy); + } + } + return policy; + } + + public static void verifyBucketName( String bucketName, boolean useDNSGuidelines ) throws InvalidBucketName + { + // [A] To comply with Amazon S3 basic requirements, bucket names must meet the following conditions + // -> must be between 3 and 255 characters long + int size = bucketName.length(); + if (3 > size || size > 255) + throw new InvalidBucketName( bucketName + " is not between 3 and 255 characters long" ); + + // -> must start with a number or letter + if (!Character.isLetterOrDigit( bucketName.charAt( 0 ))) + throw new InvalidBucketName( bucketName + " does not start with a number or letter" ); + + // -> can contain lowercase letters, numbers, periods (.), underscores (_), and dashes (-) + // -> the bucket name can also contain uppercase letters but it is not recommended + for( int i=0; i < bucketName.length(); i++ ) + { + char next = bucketName.charAt(i); + if (Character.isLetter( next )) continue; + else if (Character.isDigit( next )) continue; + else if ('.' == next) continue; + else if ('_' == next) continue; + else if ('-' == next) continue; + else throw new InvalidBucketName( bucketName + " contains the invalid character: " + next ); + } + + // -> must not be formatted as an IP address (e.g., 192.168.5.4) + String[] parts = bucketName.split( "\\." ); + if (4 == parts.length) + { + try { + int first = Integer.parseInt( parts[0] ); + int second = Integer.parseInt( parts[1] ); + int third = Integer.parseInt( parts[2] ); + int fourth = Integer.parseInt( parts[3] ); + throw new InvalidBucketName( bucketName + " is formatted as an IP address" ); + } + catch( NumberFormatException e ) + {throw new InvalidBucketName( bucketName);} + } + + + // [B] To conform with DNS requirements, Amazon recommends following these additional guidelines when creating buckets + // -> bucket names should be between 3 and 63 characters long + if (useDNSGuidelines) + { + // -> bucket names should be between 3 and 63 characters long + if (3 > size || size > 63) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " is not between 3 and 63 characters long" ); + + // -> bucket names should not contain underscores (_) + int pos = bucketName.indexOf( '_' ); + if (-1 != pos) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain underscores" ); + + // -> bucket names should not end with a dash + if (bucketName.endsWith( "-" )) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not end with a dash" ); + + // -> bucket names cannot contain two, adjacent periods + pos = bucketName.indexOf( ".." ); + if (-1 != pos) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \"..\"" ); + + // -> bucket names cannot contain dashes next to periods (e.g., "my-.bucket.com" and "my.-bucket" are invalid) + if (-1 != bucketName.indexOf( "-." ) || -1 != bucketName.indexOf( ".-" )) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \".-\" or \"-.\"" ); + } + } + + private static boolean hasPermission( List privileges, int requestedPermission ) + { + ListIterator it = privileges.listIterator(); + while( it.hasNext()) + { + // True providing the requested permission is contained in one or the granted rights for this user. False otherwise. + SAcl rights = (SAcl)it.next(); + int permission = rights.getPermission(); + if (requestedPermission == (permission & requestedPermission)) return true; + } + return false; + } + + /** + * ifRange is true and ifUnmodifiedSince or IfMatch fails then we return the entire object (indicated by + * returning a -1 as the function result. + * + * @param ifCond - conditional get defined by these tests + * @param lastModified - value used on ifModifiedSince or ifUnmodifiedSince + * @param ETag - value used on ifMatch and ifNoneMatch + * @param ifRange - using an if-Range HTTP functionality + * @return -1 means return the entire object with an HTTP 200 (not a subrange) + */ + private int conditionPassed( S3ConditionalHeaders ifCond, Date lastModified, String ETag, boolean ifRange ) + { + if (null == ifCond) return 200; + + if (0 > ifCond.ifModifiedSince( lastModified )) + return 304; + + if (0 > ifCond.ifUnmodifiedSince( lastModified )) + return (ifRange ? -1 : 412); + + if (0 > ifCond.ifMatchEtag( ETag )) + return (ifRange ? -1 : 412); + + if (0 > ifCond.ifNoneMatchEtag( ETag )) + return 412; + + return 200; + } +} diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java index e775a2c33dd..683df23c87e 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java @@ -1,84 +1,84 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.util.List; - -import com.cloud.bridge.model.SAcl; -import com.cloud.bridge.model.SBucket; -import com.cloud.bridge.service.exception.UnsupportedException; - -/** - * @author Kelven Yang, John Zucker - * Each relation holds - * a grantee - which is one of SAcl.GRANTEE_USER, SAcl.GRANTEE_ALLUSERS, SAcl.GRANTEE_AUTHENTICATED - * a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ, - * SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL - * canonicalUserID - */ -public class S3Grant { - private int grantee; // SAcl.GRANTEE_USER etc - private int permission; // SAcl.PERMISSION_READ etc - private String canonicalUserID; - - public S3Grant() { - } - - public int getGrantee() { - return grantee; - } - - public void setGrantee(int grantee) { - this.grantee = grantee; - } - - public int getPermission() { - return permission; - } - - public void setPermission(int permission) { - this.permission = permission; - } - - public String getCanonicalUserID() { - return canonicalUserID; - } - - public void setCanonicalUserID(String canonicalUserID) { - this.canonicalUserID = canonicalUserID; - } - - /* Return an array of S3Grants holding the permissions of grantees by grantee type and their canonicalUserIds. - * Used by S3 engine to get ACL policy requests for buckets and objects. - */ - public static S3Grant[] toGrants(List grants) { - if(grants != null) - { - S3Grant[] entries = new S3Grant[grants.size()]; - int i = 0; - for(SAcl acl: grants) { - entries[i] = new S3Grant(); - entries[i].setGrantee(acl.getGranteeType()); - entries[i].setCanonicalUserID(acl.getGranteeCanonicalId()); - entries[i].setPermission(acl.getPermission()); - i++; - } - return entries; - } - return null; - } - -} \ No newline at end of file +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.util.List; + +import com.cloud.bridge.model.SAcl; +import com.cloud.bridge.model.SBucket; +import com.cloud.bridge.service.exception.UnsupportedException; + +/** + * @author Kelven Yang, John Zucker + * Each relation holds + * a grantee - which is one of SAcl.GRANTEE_USER, SAcl.GRANTEE_ALLUSERS, SAcl.GRANTEE_AUTHENTICATED + * a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ, + * SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL + * canonicalUserID + */ +public class S3Grant { + private int grantee; // SAcl.GRANTEE_USER etc + private int permission; // SAcl.PERMISSION_READ etc + private String canonicalUserID; + + public S3Grant() { + } + + public int getGrantee() { + return grantee; + } + + public void setGrantee(int grantee) { + this.grantee = grantee; + } + + public int getPermission() { + return permission; + } + + public void setPermission(int permission) { + this.permission = permission; + } + + public String getCanonicalUserID() { + return canonicalUserID; + } + + public void setCanonicalUserID(String canonicalUserID) { + this.canonicalUserID = canonicalUserID; + } + + /* Return an array of S3Grants holding the permissions of grantees by grantee type and their canonicalUserIds. + * Used by S3 engine to get ACL policy requests for buckets and objects. + */ + public static S3Grant[] toGrants(List grants) { + if(grants != null) + { + S3Grant[] entries = new S3Grant[grants.size()]; + int i = 0; + for(SAcl acl: grants) { + entries[i] = new S3Grant(); + entries[i].setGrantee(acl.getGranteeType()); + entries[i].setCanonicalUserID(acl.getGranteeCanonicalId()); + entries[i].setPermission(acl.getPermission()); + i++; + } + return entries; + } + return null; + } + +} diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3ListAllMyBucketsEntry.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3ListAllMyBucketsEntry.java index 81c32eb590b..2899dc587f8 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3ListAllMyBucketsEntry.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3ListAllMyBucketsEntry.java @@ -1,58 +1,58 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.util.Calendar; -import java.util.TimeZone; - -/** - * @author Kelven Yang - */ -public class S3ListAllMyBucketsEntry { - private String name; - private Calendar creationDate; - - public S3ListAllMyBucketsEntry() { - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public Calendar getCreationDate() { - - // cal.setTimeZone(TimeZone.getTimeZone("Z")); - // java.util.Date d = cal.getTime(); - - // java.util.Date d = creationDate.getTime(); - // com.cloud.bridge.util.ISO8601SimpleDateTimeFormat sdf = new com.cloud.bridge.util.ISO8601SimpleDateTimeFormat(); - // sdf.format(d); - // java.lang.StringBuffer b = com.cloud.bridge.util.ISO8601SimpleDateTimeFormat.format(d); return b; - - return creationDate; - - - - } - - public void setCreationDate(Calendar creationDate) { - this.creationDate = creationDate; - } -} \ No newline at end of file +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.util.Calendar; +import java.util.TimeZone; + +/** + * @author Kelven Yang + */ +public class S3ListAllMyBucketsEntry { + private String name; + private Calendar creationDate; + + public S3ListAllMyBucketsEntry() { + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Calendar getCreationDate() { + + // cal.setTimeZone(TimeZone.getTimeZone("Z")); + // java.util.Date d = cal.getTime(); + + // java.util.Date d = creationDate.getTime(); + // com.cloud.bridge.util.ISO8601SimpleDateTimeFormat sdf = new com.cloud.bridge.util.ISO8601SimpleDateTimeFormat(); + // sdf.format(d); + // java.lang.StringBuffer b = com.cloud.bridge.util.ISO8601SimpleDateTimeFormat.format(d); return b; + + return creationDate; + + + + } + + public void setCreationDate(Calendar creationDate) { + this.creationDate = creationDate; + } +} diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineRequest.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineRequest.java index 065d58f68f2..b37655cabd7 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineRequest.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineRequest.java @@ -1,111 +1,111 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; - -import javax.activation.DataHandler; - -/** - * @author Kelven Yang, John Zucker - */ -public class S3PutObjectInlineRequest extends S3Request { - protected String bucketName; - protected String key; - protected long contentLength; - protected S3MetaDataEntry[] metaEntries; - protected S3AccessControlList acl; - protected String cannedAccessPolicy; // Canned ACLs are public-read, public-read-write, private, authenticated-read or log-delivery-write - protected DataHandler data; - protected String dataAsString; - - public S3PutObjectInlineRequest() { - super(); - data = null; - } - - public String getBucketName() { - return bucketName; - } - - public void setBucketName(String bucketName) { - this.bucketName = bucketName; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public long getContentLength() { - return contentLength; - } - - public void setContentLength(long contentLength) { - this.contentLength = contentLength; - } - - public S3MetaDataEntry[] getMetaEntries() { - return metaEntries; - } - - public void setMetaEntries(S3MetaDataEntry[] metaEntries) { - this.metaEntries = metaEntries; - } - - public S3AccessControlList getAcl() { - return acl; - } - - public void setAcl(S3AccessControlList acl) { - this.acl = acl; - } - - public String getCannedAccess() { - return cannedAccessPolicy; - } - - public void setCannedAccess(String cannedAccessPolicy) { - this.cannedAccessPolicy = cannedAccessPolicy; - } - - public DataHandler getData() { - return data; - } - - public void setData(DataHandler data) { - this.data = data; - } - - public void setDataAsString( String data ) { - this.dataAsString = data; - } - - public InputStream getDataInputStream() throws IOException - { - if ( null == data ) - { - ByteArrayInputStream bs = new ByteArrayInputStream( dataAsString.getBytes()); - return bs; - } - else return data.getInputStream(); - } -} \ No newline at end of file +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; + +import javax.activation.DataHandler; + +/** + * @author Kelven Yang, John Zucker + */ +public class S3PutObjectInlineRequest extends S3Request { + protected String bucketName; + protected String key; + protected long contentLength; + protected S3MetaDataEntry[] metaEntries; + protected S3AccessControlList acl; + protected String cannedAccessPolicy; // Canned ACLs are public-read, public-read-write, private, authenticated-read or log-delivery-write + protected DataHandler data; + protected String dataAsString; + + public S3PutObjectInlineRequest() { + super(); + data = null; + } + + public String getBucketName() { + return bucketName; + } + + public void setBucketName(String bucketName) { + this.bucketName = bucketName; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public long getContentLength() { + return contentLength; + } + + public void setContentLength(long contentLength) { + this.contentLength = contentLength; + } + + public S3MetaDataEntry[] getMetaEntries() { + return metaEntries; + } + + public void setMetaEntries(S3MetaDataEntry[] metaEntries) { + this.metaEntries = metaEntries; + } + + public S3AccessControlList getAcl() { + return acl; + } + + public void setAcl(S3AccessControlList acl) { + this.acl = acl; + } + + public String getCannedAccess() { + return cannedAccessPolicy; + } + + public void setCannedAccess(String cannedAccessPolicy) { + this.cannedAccessPolicy = cannedAccessPolicy; + } + + public DataHandler getData() { + return data; + } + + public void setData(DataHandler data) { + this.data = data; + } + + public void setDataAsString( String data ) { + this.dataAsString = data; + } + + public InputStream getDataInputStream() throws IOException + { + if ( null == data ) + { + ByteArrayInputStream bs = new ByteArrayInputStream( dataAsString.getBytes()); + return bs; + } + else return data.getInputStream(); + } +} diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineResponse.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineResponse.java index b9241458420..4b4df562664 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineResponse.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineResponse.java @@ -1,67 +1,67 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.util.Calendar; - -/** - * @author Kelven Yang, John Zucker - */ -public class S3PutObjectInlineResponse extends S3Response { - protected String ETag; - protected Calendar lastModified; - protected String version; - protected int uploadId; - - public S3PutObjectInlineResponse() { - super(); - uploadId = -1; - } - - // add ETag header computed as Base64 MD5 whenever object is uploaded or updated - // the Base64 is represented in lowercase - public String getETag() { - return ETag; - } - - public void setETag(String eTag) { - this.ETag = eTag; - } - - public Calendar getLastModified() { - return lastModified; - } - - public void setLastModified(Calendar lastModified) { - this.lastModified = lastModified; - } - - public String getVersion() { - return version; - } - - public void setVersion(String version) { - this.version = version; - } - - public int getUploadId() { - return uploadId; - } - - public void setUploadId(int uploadId) { - this.uploadId = uploadId; - } -} \ No newline at end of file +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.util.Calendar; + +/** + * @author Kelven Yang, John Zucker + */ +public class S3PutObjectInlineResponse extends S3Response { + protected String ETag; + protected Calendar lastModified; + protected String version; + protected int uploadId; + + public S3PutObjectInlineResponse() { + super(); + uploadId = -1; + } + + // add ETag header computed as Base64 MD5 whenever object is uploaded or updated + // the Base64 is represented in lowercase + public String getETag() { + return ETag; + } + + public void setETag(String eTag) { + this.ETag = eTag; + } + + public Calendar getLastModified() { + return lastModified; + } + + public void setLastModified(Calendar lastModified) { + this.lastModified = lastModified; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + public int getUploadId() { + return uploadId; + } + + public void setUploadId(int uploadId) { + this.uploadId = uploadId; + } +} diff --git a/awsapi/src/com/cloud/bridge/util/DimeDelimitedInputStream.java b/awsapi/src/com/cloud/bridge/util/DimeDelimitedInputStream.java deleted file mode 100644 index 3d8c2023cd7..00000000000 --- a/awsapi/src/com/cloud/bridge/util/DimeDelimitedInputStream.java +++ /dev/null @@ -1,617 +0,0 @@ -/* Took the basic code from Axis 1.2 and modified to fit into the cloud code base */ - -/* - * Copyright 2001-2004 The Apache Software Foundation. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.util; - -import java.io.IOException; -import java.io.InputStream; -import java.io.FilterInputStream; -import org.apache.log4j.Logger; - - -/** - * This class takes the input stream and turns it multiple streams. - DIME version 0 format -
- 0                   1                   2                   3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+  ---
- | VERSION |B|E|C| TYPE_T| OPT_T |         OPTIONS_LENGTH        |   A
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |          ID_LENGTH          |             TYPE_LENGTH         |   Always present 12 bytes
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+   even on chunked data.
- |                          DATA_LENGTH                          |   V
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+  ---
- |                                                               /
- /                       OPTIONS + PADDING                       /
- /                     (absent for version 0)                    |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |                                                               /
- /                        ID + PADDING                           /
- /                                                               |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |                                                               /
- /                        TYPE + PADDING                         /
- /                                                               |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |                                                               /
- /                        DATA + PADDING                         /
- /                                                               |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
- * This implementation of input stream does not support marking operations. - * - * Incoming data is DIME encoded when its MIME type is "application/dime". - * Then use this class to pull out 2 streams: - * (1) The first stream is the SOAP request, - * (2) The second stream is a chunked attachment (e.g., a file to store) - * - * The DIME format is defined at this reference: - * http://msdn.microsoft.com/en-us/library/aa480488.aspx - * - * @author Rick Rineholt - */ -public class DimeDelimitedInputStream extends FilterInputStream -{ - protected final static Logger logger = Logger.getLogger(DimeDelimitedInputStream.class); - - InputStream is = null; //The source input stream. - boolean closed = true; //The stream has been closed. - boolean theEnd = false; //There are no more streams left. - boolean moreChunks = false; //More chunks are a coming! - boolean MB = false; //Message begin flag - boolean ME = false; //Message end flag - String type = null; // - String id = null; // - String tnf = null; //DIME type format - long recordLength = 0L; //length of the current record. - long bytesRead = 0L; //How many bytes of the current record have been read. - int dataPadLength = 0; //How many pad bytes there are. - - protected int streamNo = 0; - protected IOException streamInError = null; - - private static byte[] trash = new byte[4]; - protected static int streamCount = 0; //number of streams produced. - - protected static synchronized int newStreamNo() - { - logger.debug( "streamNo " + (streamCount + 1)); - return ++streamCount; - } - - - /** - * There can be multiple streams in a DIME encoding. For example, the first - * stream can be a SOAP message, and the second stream a binary attachment (e.g., - * a file). During reading after an EOF is returned, this function should be - * called to see if there is another stream following the last. - * - * @return the dime delimited stream, null if there are no more streams - * @throws IOException if there was an error loading the data for the next stream - */ - public synchronized DimeDelimitedInputStream getNextStream() throws IOException - { - if (null != streamInError) throw streamInError; - if (theEnd) return null; - - //Each Stream must be read in succession - if (bytesRead < recordLength || moreChunks) - throw new RuntimeException("attach.dimeReadFullyError"); - - dataPadLength -= readPad(dataPadLength); - - //Create an new dime stream that comes after this one. - return new DimeDelimitedInputStream( this.is ); - } - - - /** - * Create a new dime stream. - * - * @param is the InputStream to wrap - * @throws IOException if anything goes wrong - */ - public DimeDelimitedInputStream( InputStream is ) throws IOException - { - super(null); - streamNo = newStreamNo(); - closed = false; - this.is = is; - readHeader( false ); - } - - - /** - * Make sure to skip the pad which appear in several parts of a DIME message. - * @param size - * @return - * @throws IOException - */ - private final int readPad( int size ) throws IOException - { - if (0 == size) return 0; - int read = readFromStream(trash, 0, size); - - if (size != read) - { - streamInError = new IOException("attach.dimeNotPaddedCorrectly"); - throw streamInError; - } - return read; - } - - - private final int readFromStream( byte[] b ) throws IOException - { - return readFromStream( b, 0, b.length ); - } - - private final int readFromStream( byte[] b, int start, int length ) throws IOException - { - int br = 0; - int brTotal = 0; - - if (length == 0) return 0; - - do - { try - { - br = is.read( b, brTotal + start, length - brTotal ); - } - catch (IOException e) - { - streamInError = e; - throw e; - } - if (br > 0) brTotal += br; - } - while( br > -1 && brTotal < length ); - - return br > -1 ? brTotal : br; - } - - - /** - * Get the id for this stream part. - * @return the id; - */ - public String getContentId() - { - return id; - } - - public String getDimeTypeNameFormat() - { - return tnf; - } - - /** - * Get the type, as read from the header. - * @return the type of this dime - */ - public String getType() - { - return type; - } - - - /** - * Read from the DIME stream. - * - * @param b is the array to read into. - * @param off is the offset - * @return the number of bytes read. -1 if endof stream - * @throws IOException if data could not be read from the stream - */ - public synchronized int read( byte[] b, int off, int len ) throws IOException - { - if (closed) - { - dataPadLength -= readPad(dataPadLength); - throw new IOException( "streamClosed" ); - } - return _read( b, off, len ); - } - - - protected int _read( byte[] b, int off, int len ) throws IOException - { - int totalbytesread = 0; - int bytes2read = 0; - - if (len < 0) - throw new IllegalArgumentException( "attach.readLengthError" + len ); - - if (off < 0) - throw new IllegalArgumentException( "attach.readOffsetError" + off ); - - if (b == null) - throw new IllegalArgumentException( "attach.readArrayNullError" ); - - if (b.length < off + len) - throw new IllegalArgumentException("attach.readArraySizeError " + b.length + " " + len + " " + off ); - - if (null != streamInError) throw streamInError; - if (0 == len) return 0; //quick. - - // odd case no data to read -- give back 0 next time -1; - if (recordLength == 0 && bytesRead == 0 && !moreChunks) - { - ++bytesRead; - if (ME) finalClose(); - return 0; - } - if (bytesRead >= recordLength && !moreChunks) - { - dataPadLength -= readPad( dataPadLength ); - if (ME) finalClose(); - return -1; - } - - - do - { if (bytesRead >= recordLength && moreChunks) readHeader( true ); - bytes2read = (int) Math.min( recordLength - bytesRead, (long)len - totalbytesread ); - - try - { bytes2read = is.read( b, off + totalbytesread, bytes2read ); - } - catch (IOException e) - { - streamInError = e; - throw e; - } - - if (0 < bytes2read) - { - totalbytesread += bytes2read; - bytesRead += bytes2read; - } - } - while( bytes2read > -1 && totalbytesread < len && (bytesRead < recordLength || moreChunks)); - - if ( 0 > bytes2read ) - { - if (moreChunks) - { - streamInError = new IOException("attach.DimeStreamError0"); - throw streamInError; - } - if (bytesRead < recordLength) - { - streamInError = new IOException("attach.DimeStreamError1 " + (recordLength - bytesRead)); - throw streamInError; - } - if (!ME) - { - streamInError = new IOException("attach.DimeStreamError0"); - throw streamInError; - } - //in theory the last chunk of data should also have been padded, but lets be tolerant of that. - dataPadLength = 0; - } - else if (bytesRead >= recordLength) - { - //get rid of pading. - try - { dataPadLength -= readPad( dataPadLength ); - } - catch (IOException e) - { - //in theory the last chunk of data should also have been padded, but lets be tolerant of that. - if (!ME) throw e; - else - { - dataPadLength = 0; - streamInError = null; - } - } - } - - if (bytesRead >= recordLength && ME) finalClose(); - - return totalbytesread >= 0 ? totalbytesread : -1; - } - - - /** - * The DIME header is read into local class data fields and are not - * passed as part of the stream data. - * - * @param isChunk - * @throws IOException - */ - protected void readHeader( boolean isChunk ) throws IOException - { - bytesRead = 0; //How many bytes of the record have been read. - - if (isChunk) - { - if (!moreChunks) throw new RuntimeException("attach.DimeStreamError2"); - dataPadLength -= readPad(dataPadLength); //Just in case it was left over. - } - - byte[] header = new byte[12]; - - if (header.length != readFromStream( header) ) - { - streamInError = new IOException("attach.DimeStreamError3 " + header.length ); - throw streamInError; - } - - //VERSION - byte version = (byte) ((header[0] >>> 3) & 0x1f); - if (version > 1) - { - streamInError = new IOException("attach.DimeStreamError4 " + version ); - throw streamInError; - } - - //B, E, C - MB = 0 != (0x4 & header[0]); - ME = 0 != (0x2 & header[0]); - moreChunks = 0 != (0x1 & header[0]); - - //TYPE_T - if (!isChunk) - { - switch( ((header[1] >>> 4) & (byte)0x0f) ) { - case 0x00: tnf = "UNCHANGED"; break; - case 0x01: tnf = "MIME"; break; - case 0x02: tnf = "URI"; break; - default: tnf = "UNKNOWN"; break; - } - } - - //OPTIONS_LENGTH - int optionsLength = ((((int) header[2]) << 8) & 0xff00) | ((int) header[3]); - - //ID_LENGTH - int idLength = ((((int) header[4]) << 8) & 0xff00) | ((int) header[5]); - - //TYPE_LENGTH - int typeLength = ((((int) header[6]) << 8) & 0xff00) | ((int) header[7]); - - //DATA_LENGTH - recordLength = ((((long) header[8] ) << 24) & 0xff000000L) | - ((((long) header[9] ) << 16) & 0xff0000L ) | - ((((long) header[10]) << 8 ) & 0xff00L ) | - ((long) header[11] & 0xffL ); - - //OPTIONS + PADDING - if (0 != optionsLength) - { - byte[] optBytes = new byte[optionsLength]; - - if (optionsLength != readFromStream( optBytes )) - { - streamInError = new IOException("attach.DimeStreamError5 " + optionsLength ); - throw streamInError; - } - optBytes = null; // throw it away, don't know anything about options. - - int pad = (int) ((4L - (optionsLength & 0x3L)) & 0x03L); - - if (pad != readFromStream( header, 0, pad )) - { - streamInError = new IOException("attach.DimeStreamError7"); - throw streamInError; - } - } - - // ID + PADDING - if (0 < idLength) - { - byte[] idBytes = new byte[ idLength]; - - if (idLength != readFromStream( idBytes )) - { - streamInError = new IOException("attach.DimeStreamError8"); - throw streamInError; - } - if (idLength != 0 && !isChunk) id = new String(idBytes); - - int pad = (int) ((4L - (idLength & 0x3L)) & 0x03L); - - if (pad != readFromStream( header, 0, pad )) - { - streamInError = new IOException("attach.DimeStreamError9"); - throw streamInError; - } - } - - //TYPE + PADDING - if (0 < typeLength) - { - byte[] typeBytes = new byte[typeLength]; - - if (typeLength != readFromStream( typeBytes )) - { - streamInError = new IOException("attach.DimeStreamError10"); - throw streamInError; - } - if (typeLength != 0 && !isChunk) type = new String(typeBytes); - - int pad = (int) ((4L - (typeLength & 0x3L)) & 0x03L); - - if (pad != readFromStream( header, 0, pad )) - { - streamInError = new IOException("attach.DimeStreamError11"); - throw streamInError; - } - } - logger.debug("MB:" + MB + ", ME:" + ME + ", CF:" + moreChunks + - "Option length:" + optionsLength + - ", ID length:" + idLength + - ", typeLength:" + typeLength + ", TYPE_T:" + tnf); - logger.debug("id:\"" + id + "\""); - logger.debug("type:\"" + type + "\""); - logger.debug("recordlength:\"" + recordLength + "\""); - - dataPadLength = (int) ((4L - (recordLength & 0x3L)) & 0x03L); - } - - - /** - * Read from the delimited stream. - * - * @param b is the array to read into. Read as much as possible - * into the size of this array. - * @return the number of bytes read. -1 if endof stream - * @throws IOException if data could not be read from the stream - */ - public int read( byte[] b ) throws IOException - { - return read( b, 0, b.length ); - } - - - // fixme: this seems a bit inefficient - /** - * Read from the boundary delimited stream. - * - * @return the byte read, or -1 if endof stream - * @throws IOException if there was an error reading the data - */ - public int read() throws IOException - { - byte[] b = new byte[1]; - int read = read( b, 0, 1 ); - - if (read < 0) return -1; // fixme: should we also check for read != 1? - return (b[0] & 0xff); // convert byte value to a positive int - } - - - /** - * Closes the stream. - * This will take care of flushing any remaining data to the stream. - * Multiple calls to this method will result in the stream being closed once - * and then all subsequent calls being ignored. - * - * @throws IOException if the stream could not be closed - */ - public void close() throws IOException - { - synchronized( this ) - { - if (closed) return; - closed = true; //mark it closed. - } - logger.debug("bStreamClosed " + streamNo); - - if (bytesRead < recordLength || moreChunks) - { - //We need get this off the stream. Easy way to flush through the stream; - byte[] readrest = new byte[1024 * 16]; - int bread = 0; - - do - { bread = _read( readrest, 0, readrest.length ); //should also close the original stream. - } - while( bread > -1 ); - } - dataPadLength -= readPad( dataPadLength ); - } - - - /** - * Skip n bytes of data in the DIME stream, while reading and processing - * any headers in the current stream. - * - * @param n - number of data bytes to skip - * @return number of bytes actually skipped - * @throws IOException - */ - public long skip( long n ) throws IOException - { - long bytesSkipped = 0; - long bytes2Read = 0; - byte[] dumpbytes = new byte[1024]; - - while( n > 0 ) - { - bytes2Read = (n > 1024 ? 1024 : n); - bytes2Read = _read( dumpbytes, 0, (int)bytes2Read ); - - n -= bytes2Read; - bytesSkipped += bytes2Read; - } - - return bytesSkipped; - } - - - /** - * Mark the stream. This is not supported. - */ - public void mark( int readlimit ) - { //do nothing - } - - public void reset() throws IOException - { - streamInError = new IOException("attach.bounday.mns"); - throw streamInError; - } - - public boolean markSupported() - { - return false; - } - - - public synchronized int available() throws IOException - { - if (null != streamInError) throw streamInError; - - int chunkAvail = (int) Math.min((long)Integer.MAX_VALUE, recordLength - bytesRead); - int streamAvail = 0; - - try - { streamAvail = is.available(); - } - catch( IOException e ) - { - streamInError = e; - throw e; - } - - if (chunkAvail == 0 && moreChunks && (12 + dataPadLength) <= streamAvail) - { - dataPadLength -= readPad(dataPadLength); - readHeader( true ); - return available(); - } - return Math.min( streamAvail, chunkAvail ); - } - - - protected void finalClose() throws IOException - { - try - { theEnd = true; - if(null != is) is.close(); - } - finally - { - is= null; - } - } -} - diff --git a/awsapi/src/com/cloud/bridge/util/FileRangeDataSource.java b/awsapi/src/com/cloud/bridge/util/FileRangeDataSource.java deleted file mode 100644 index fc1ff10338b..00000000000 --- a/awsapi/src/com/cloud/bridge/util/FileRangeDataSource.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.util; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import javax.activation.DataSource; - -import org.apache.log4j.Logger; - -/** - * @author Kelven Yang - */ -public class FileRangeDataSource implements DataSource { - protected final static Logger logger = Logger.getLogger(FileRangeDataSource.class); - - private FileRangeInputStream is; - - public FileRangeDataSource(File file, long startPos, long endPos) throws IOException { - is = new FileRangeInputStream(file, startPos, endPos); - } - - @Override - public String getContentType() { - assert(false); - return null; - } - - @Override - public InputStream getInputStream() throws IOException { - return is; - } - - @Override - public String getName() { - assert(false); - return null; - } - - @Override - public OutputStream getOutputStream() throws IOException { - assert(false); - return null; - } -} diff --git a/awsapi/src/com/cloud/bridge/util/FileRangeInputStream.java b/awsapi/src/com/cloud/bridge/util/FileRangeInputStream.java deleted file mode 100644 index b14512b596b..00000000000 --- a/awsapi/src/com/cloud/bridge/util/FileRangeInputStream.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.util; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.RandomAccessFile; - -/** - * @author Kelven Yang - */ -public class FileRangeInputStream extends InputStream { - private RandomAccessFile randomAccessFile; - private long curPos; - private long endPos; - private long fileLength; - - public FileRangeInputStream(File file, long startPos, long endPos) throws IOException { - fileLength = file.length(); - - if(startPos > fileLength) - startPos = fileLength; - - if(endPos > fileLength) - endPos = fileLength; - - if(startPos > endPos) - throw new IllegalArgumentException("Invalid file range " + startPos + "-" + endPos); - - this.curPos = startPos; - this.endPos = endPos; - randomAccessFile = new RandomAccessFile(file, "r"); - randomAccessFile.seek(startPos); - } - - @Override - public int available() throws IOException { - return (int)(endPos - curPos); - } - - @Override - public int read() throws IOException { - if(available() > 0) { - int value = randomAccessFile.read(); - curPos++; - return value; - } - return -1; - } - - @Override - public int read(byte[] b) throws IOException { - return read(b, 0, b.length); - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - int bytesToRead = Math.min(len, available()); - if(bytesToRead == 0) - return -1; - - int bytesRead = randomAccessFile.read(b, off, bytesToRead); - if(bytesRead < 0) - return -1; - - curPos += bytesRead; - return bytesRead; - } - - @Override - public long skip(long n) throws IOException { - long skipped = Math.min(n, available()); - randomAccessFile.skipBytes((int)skipped); - curPos += skipped; - return skipped; - } - - @Override - public void close() throws IOException { - randomAccessFile.close(); - } -} diff --git a/awsapi/src/com/cloud/bridge/util/MultiPartDimeInputStream.java b/awsapi/src/com/cloud/bridge/util/MultiPartDimeInputStream.java deleted file mode 100644 index b34dc0aded4..00000000000 --- a/awsapi/src/com/cloud/bridge/util/MultiPartDimeInputStream.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.util; - -import org.apache.log4j.Logger; - -import java.io.InputStream; -import java.io.IOException; - -/** - * A DIME stream is actually composed of multiple encoded streams. - * This class is a wrapper around the DimeDelimitedInputStream inorder - * to provide a simple iterator like interface for all the streams in a - * DIME encoded message. - */ -public class MultiPartDimeInputStream -{ - protected final static Logger logger = Logger.getLogger(MultiPartDimeInputStream.class); - - protected InputStream is = null; - protected DimeDelimitedInputStream currentStream = null; - - protected int count = 0; - protected boolean eos = false; - protected String contentId = null; - protected String type = null; - protected String typeFormat = null; - - /** - * The SOAP stream must be first, call nextInputStream to get - * access to the first stream and all streams after that. - * - * @param is the true input stream holding the incoming request. - */ - public MultiPartDimeInputStream( InputStream is ) throws IOException - { - this.is = is; - } - - - /** - * These three methods are DIME specific but provide potentially - * useful information about the current stream's data. - * - * @return URL or MIME type - */ - public String getStreamType() - { - return type; - } - - public String getStreamTypeFormat() - { - // Is the type a URI or MIME type or just unknown? - return typeFormat; - } - - public String getStreamId() - { - // The soap body might have string identifiers to point to other streams in the message - return contentId; - } - - public InputStream getInputStream() - { - return currentStream; - } - - public int available() throws IOException - { - if (eos) return -1; - - if (null == currentStream) - { - throw new IOException( "streamClosed -- call nextInputStream()" ); - } - return currentStream.available(); - } - - - /** - * Move on to the next stream encoded in the DIME stream. - * If the current stream has not been all read, then we skip the remaining bytes of - * that stream. - * - * @return false if no next input stream, true if next input stream ready - * @throws IOException - */ - public boolean nextInputStream() throws IOException - { - if ( null == currentStream ) - { - // on the first call to this function get the first stream - if (0 == count) currentStream = new DimeDelimitedInputStream( is ); - } - else - { // make sure the bytes of the previous stream are all skipped before we start the next - currentStream.close(); - contentId = null; - type = null; - typeFormat = null; - currentStream = currentStream.getNextStream(); - } - - if ( null != currentStream ) - { - contentId = currentStream.getContentId(); - type = currentStream.getType(); - typeFormat = currentStream.getDimeTypeNameFormat(); - eos = false; - count++; - return true; - } - else return false; - } - - - public long skip( long n ) throws IOException - { - if (eos || null == currentStream) - { - throw new IOException( "streamClosed -- call nextInputStream()" ); - } - return currentStream.skip( n ); - } - - - public int read( byte[] b, int off, int len ) throws IOException - { - if (eos || null == currentStream) return -1; - - int read = currentStream.read( b, off, len ); - - if (read < 0) eos = true; - - return read; - } - - - public int read( byte[] b ) throws IOException - { - return read( b, 0, b.length ); - } - - - public int read() throws IOException - { - if (eos || null == currentStream) return -1; - - int ret = currentStream.read(); - - if (ret < 0) eos = true; - - return ret; - } -} diff --git a/awsapi/src/com/cloud/bridge/util/RestAuth.java b/awsapi/src/com/cloud/bridge/util/RestAuth.java index 9fa88de80d4..9c4eb8c095a 100644 --- a/awsapi/src/com/cloud/bridge/util/RestAuth.java +++ b/awsapi/src/com/cloud/bridge/util/RestAuth.java @@ -1,359 +1,402 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.util; - -import java.security.SignatureException; -import java.util.*; -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; - -import javax.crypto.Mac; -import javax.crypto.spec.SecretKeySpec; - -import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; - - -/** - * This class expects that the caller pulls the required headers from the standard - * HTTPServeletRequest structure. Notes are given below on what values are expected. - * This class is used for the Authentication check for REST requests and Query String - * Authentication requests. - */ -public class RestAuth { - protected final static Logger logger = Logger.getLogger(RestAuth.class); - - // TreeMap: used when constructing the CanonicalizedAmzHeaders Element of the StringToSign - protected TreeMap AmazonHeaders = null; // not always present - protected String bucketName = null; // not always present - protected String queryString = null; // only interested in a small set of values - protected String uriPath = null; // only interested in the resource path - protected String date = null; // only if x-amz-date is not set - protected String contentType = null; // not always present - protected String contentMD5 = null; // not always present - protected boolean amzDateSet = false; - protected boolean useSubDomain = false; - - public RestAuth() { - // these must be lexicographically sorted - AmazonHeaders = new TreeMap(); - } - - public RestAuth(boolean useSubDomain) { - // these must be lexicographically sorted - AmazonHeaders = new TreeMap(); - this.useSubDomain = useSubDomain; - } - - public void setUseSubDomain(boolean value) { - useSubDomain = value; - } - - public boolean getUseSubDomain() { - return useSubDomain; - } - - /** - * This header is used iff the "x-amz-date:" header is not defined. - * Value is used in constructing the StringToSign for signature verification. - * - * @param date - the contents of the "Date:" header, skipping the 'Date:' preamble. - * OR pass in the value of the "Expires=" query string parameter passed in - * for "Query String Authentication". - */ - public void setDateHeader( String date ) { - if (this.amzDateSet) return; - if (null != date) date = date.trim(); - this.date = date; - } - - /** - * Value is used in constructing the StringToSign for signature verification. - * - * @param type - the contents of the "Content-Type:" header, skipping the 'Content-Type:' preamble. - */ - public void setContentTypeHeader( String type ) { - if (null != type) type = type.trim(); - this.contentType = type; - } - - - /** - * Value is used in constructing the StringToSign for signature verification. - * - * @param type - the contents of the "Content-MD5:" header, skipping the 'Content-MD5:' preamble. - */ - public void setContentMD5Header( String md5 ) { - if (null != md5) md5 = md5.trim(); - this.contentMD5 = md5; - } - - - /** - * The bucket name can be in the "Host:" header but it does not have to be. It can - * instead be in the uriPath as the first step in the path. - * - * Used as part of the CanonalizedResource element of the StringToSign. - * If we get "Host: static.johnsmith.net:8080", then the bucket name is "static.johnsmith.net" - * - * @param header - contents of the "Host:" header, skipping the 'Host:' preamble. - */ - public void setHostHeader( String header ) { - if (null == header) { - this.bucketName = null; - return; - } - - // -> is there a port on the name? - header = header.trim(); - int offset = header.indexOf( ":" ); - if (-1 != offset) header = header.substring( 0, offset ); - this.bucketName = header; - } - - - /** - * Used as part of the CanonalizedResource element of the StringToSign. - * - * @param query - results from calling "HttpServletRequest req.getQueryString()" - */ - public void setQueryString( String query ) { - if (null == query) { - this.queryString = null; - return; - } - - query = new String( "?" + query.trim()); - - // -> only interested in this subset of parameters - if (query.startsWith( "?versioning") || query.startsWith( "?location" ) || - query.startsWith( "?acl" ) || query.startsWith( "?torrent" )) { - - // -> include any value (i.e., with '=') and chop of the rest - int offset = query.indexOf( "&" ); - if ( -1 != offset ) query = query.substring( 0, offset ); - this.queryString = query; - } - } - - - /** - * Used as part of the CanonalizedResource element of the StringToSign. - * Append the path part of the un-decoded HTTP Request-URI, up-to but not including the query string. - * - * @param path - - results from calling "HttpServletRequest req.getPathInfo()" - */ - public void addUriPath( String path ) { - if (null != path) path = path.trim(); - this.uriPath = path; - } - - - /** - * Pass in each complete Amazon header found in the HTTP request one at a time. - * Each Amazon header added will become part of the signature calculation. - * We are using a TreeMap here because of the S3 definition: - * "Sort the collection of headers lexicographically by header name." - * - * @param headerAndValue - needs to be the complete amazon header (i.e., starts with "x-amz"). - */ - public void addAmazonHeader( String headerAndValue ) { - if (null == headerAndValue) return; - - String canonicalized = null; - - // [A] First Canonicalize the header and its value - // -> we use the header 'name' as the key since we have to sort on that - int offset = headerAndValue.indexOf( ":" ); - String header = headerAndValue.substring( 0, offset+1 ).toLowerCase(); - String value = headerAndValue.substring( offset+1 ).trim(); - - // -> RFC 2616, Section 4.2: unfold the header's value by replacing linear white space with a single space character - // -> does the HTTPServeletReq already do this for us? - value = value.replaceAll( " ", " " ); // -> multiple spaces to one space - value = value.replaceAll( "(\r\n|\t|\n)", " " ); // -> CRLF, tab, and LF to one space - - - // [B] Does this header already exist? - if ( AmazonHeaders.containsKey( header )) { - // -> combine header fields with the same name into one "header-name:comma-separated-value-list" pair as prescribed by RFC 2616, section 4.2, without any white-space between values. - canonicalized = AmazonHeaders.get( header ); - canonicalized = new String( canonicalized + "," + value + "\n" ); - canonicalized = canonicalized.replaceAll( "\n,", "," ); // remove the '\n' from the first stored value - } - else canonicalized = new String( header + value + "\n" ); // -> as per spec, no space between header and its value - - AmazonHeaders.put( header, canonicalized ); - - // [C] "x-amz-date:" takes precedence over the "Date:" header - if (header.equals( "x-amz-date:" )) { - this.amzDateSet = true; - if (null != this.date) this.date = null; - } - } - - - /** - * The request is authenticated if we can regenerate the same signature given - * on the request. Before calling this function make sure to set the header values - * defined by the public values above. - * - * @param httpVerb - the type of HTTP request (e.g., GET, PUT) - * @param secretKey - value obtained from the AWSAccessKeyId - * @param signature - the signature we are trying to recreate, note can be URL-encoded - * - * @throws SignatureException - * - * @return true if request has been authenticated, false otherwise - * @throws UnsupportedEncodingException - */ - public boolean verifySignature( String httpVerb, String secretKey, String signature ) - throws SignatureException, UnsupportedEncodingException { - - if (null == httpVerb || null == secretKey || null == signature) return false; - - httpVerb = httpVerb.trim(); - secretKey = secretKey.trim(); - signature = signature.trim(); - - // -> first calculate the StringToSign after the caller has initialized all the header values - String StringToSign = genStringToSign( httpVerb ); - String calSig = calculateRFC2104HMAC( StringToSign, secretKey ); - - // -> was the passed in signature URL encoded? (it must be base64 encoded) - int offset = signature.indexOf( "%" ); - if (-1 != offset) signature = URLDecoder.decode( signature, "UTF-8" ); - - boolean match = signature.equals( calSig ); - if (!match) - logger.error( "Signature mismatch, [" + signature + "] [" + calSig + "] over [" + StringToSign + "]" ); - - return match; - } - - - /** - * This function generates the single string that will be used to sign with a users - * secret key. - * - * StringToSign = HTTP-Verb + "\n" + - * Content-MD5 + "\n" + - * Content-Type + "\n" + - * Date + "\n" + - * CanonicalizedAmzHeaders + - * CanonicalizedResource; - * - * @return The single StringToSign or null. - */ - private String genStringToSign( String httpVerb ) { - StringBuffer canonicalized = new StringBuffer(); - String temp = null; - - canonicalized.append( httpVerb ).append( "\n" ); - if (null != this.contentMD5) - canonicalized.append( this.contentMD5 ); - canonicalized.append( "\n" ); - - if (null != this.contentType) - canonicalized.append( this.contentType ); - canonicalized.append( "\n" ); - - if (null != this.date) - canonicalized.append( this.date ); - - canonicalized.append( "\n" ); - - if (null != (temp = genCanonicalizedAmzHeadersElement())) canonicalized.append( temp ); - if (null != (temp = genCanonicalizedResourceElement() )) canonicalized.append( temp ); - - if ( 0 == canonicalized.length()) - return null; - - return canonicalized.toString(); - } - - - /** - * CanonicalizedResource represents the Amazon S3 resource targeted by the request. - * CanonicalizedResource = [ "/" + Bucket ] + - * + - * [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; - * - * @return A single string representing CanonicalizedResource or null. - */ - private String genCanonicalizedResourceElement() { - StringBuffer canonicalized = new StringBuffer(); - - if(this.useSubDomain && this.bucketName != null) - canonicalized.append( "/" ).append( this.bucketName ); - - if (null != this.uriPath ) canonicalized.append( this.uriPath ); - if (null != this.queryString) canonicalized.append( this.queryString ); - - if ( 0 == canonicalized.length()) - return null; - - return canonicalized.toString(); - } - - - /** - * Construct the Canonicalized Amazon headers element of the StringToSign by - * concatenating all headers in the TreeMap into a single string. - * - * @return A single string with all the Amazon headers glued together, or null - * if no Amazon headers appeared in the request. - */ - private String genCanonicalizedAmzHeadersElement() { - Collection headers = AmazonHeaders.values(); - Iterator itr = headers.iterator(); - StringBuffer canonicalized = new StringBuffer(); - - while( itr.hasNext()) - canonicalized.append( itr.next()); - - if ( 0 == canonicalized.length()) - return null; - - return canonicalized.toString(); - } - - - /** - * Create a signature by the following method: - * new String( Base64( SHA1( key, byte array ))) - * - * @param signIt - the data to generate a keyed HMAC over - * @param secretKey - the user's unique key for the HMAC operation - * @return String - the recalculated string - * @throws SignatureException - */ - private String calculateRFC2104HMAC( String signIt, String secretKey ) - throws SignatureException { - - String result = null; - try { - SecretKeySpec key = new SecretKeySpec( secretKey.getBytes(), "HmacSHA1" ); - Mac hmacSha1 = Mac.getInstance( "HmacSHA1" ); - hmacSha1.init( key ); - byte [] rawHmac = hmacSha1.doFinal( signIt.getBytes()); - result = new String( Base64.encodeBase64( rawHmac )); - } catch( Exception e ) { - throw new SignatureException( "Failed to generate keyed HMAC on REST request: " + e.getMessage()); - } - return result.trim(); - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.util; + +import java.security.InvalidKeyException; +import java.security.SignatureException; +import java.util.*; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import org.apache.commons.codec.binary.Base64; +import org.apache.log4j.Logger; + + +/** + * This class expects that the caller pulls the required headers from the standard + * HTTPServeletRequest structure. This class is responsible for providing the + * RFC2104 calculation to ensure that the signature is valid for the signing string. + * The signing string is a representation of the request. + * Notes are given below on what values are expected. + * This class is used for the Authentication check for REST requests and Query String + * Authentication requests. + * + * @author Kelven Yang, John Zucker, Salvatore Orlando + */ + +public class RestAuth { + protected final static Logger logger = Logger.getLogger(RestAuth.class); + + // TreeMap: used when constructing the CanonicalizedAmzHeaders Element of the StringToSign + protected TreeMap AmazonHeaders = null; // not always present + protected String bucketName = null; // not always present + protected String queryString = null; // for CanonicalizedResource - only interested in a string starting with particular values + protected String uriPath = null; // only interested in the resource path + protected String date = null; // only if x-amz-date is not set + protected String contentType = null; // not always present + protected String contentMD5 = null; // not always present + protected boolean amzDateSet = false; + protected boolean useSubDomain = false; + + protected Set allowedQueryParams; + + public RestAuth() { + // these must be lexicographically sorted + AmazonHeaders = new TreeMap(); + allowedQueryParams = new HashSet() {{ + add("acl"); + add("lifecycle"); + add("location"); + add("logging"); + add("notification"); + add("partNumber"); + add("policy"); + add("requestPayment"); + add("torrent"); + add("uploadId"); + add("uploads"); + add("versionId"); + add("versioning"); + add("versions"); + add("website"); + }}; + } + + public RestAuth(boolean useSubDomain) { + //invoke the other constructor + this(); + this.useSubDomain = useSubDomain; + } + + public void setUseSubDomain(boolean value) { + useSubDomain = value; + } + + public boolean getUseSubDomain() { + return useSubDomain; + } + + /** + * This header is used iff the "x-amz-date:" header is not defined. + * Value is used in constructing the StringToSign for signature verification. + * + * @param date - the contents of the "Date:" header, skipping the 'Date:' preamble. + * OR pass in the value of the "Expires=" query string parameter passed in + * for "Query String Authentication". + */ + public void setDateHeader( String date ) { + if (this.amzDateSet) return; + if (null != date) date = date.trim(); + this.date = date; + } + + /** + * Value is used in constructing the StringToSign for signature verification. + * + * @param type - the contents of the "Content-Type:" header, skipping the 'Content-Type:' preamble. + */ + public void setContentTypeHeader( String type ) { + if (null != type) type = type.trim(); + this.contentType = type; + } + + + /** + * Value is used in constructing the StringToSign for signature verification. + * @param type - the contents of the "Content-MD5:" header, skipping the 'Content-MD5:' preamble. + */ + public void setContentMD5Header( String md5 ) { + if (null != md5) md5 = md5.trim(); + this.contentMD5 = md5; + } + + + /** + * The bucket name can be in the "Host:" header but it does not have to be. It can + * instead be in the uriPath as the first step in the path. + * + * Used as part of the CanonalizedResource element of the StringToSign. + * If we get "Host: static.johnsmith.net:8080", then the bucket name is "static.johnsmith.net" + * + * @param header - contents of the "Host:" header, skipping the 'Host:' preamble. + */ + public void setHostHeader( String header ) { + if (null == header) { + this.bucketName = null; + return; + } + + // -> is there a port on the name? + header = header.trim(); + int offset = header.indexOf( ":" ); + if (-1 != offset) header = header.substring( 0, offset ); + this.bucketName = header; + } + + + /** + * Used as part of the CanonalizedResource element of the StringToSign. + * CanonicalizedResource = [ "/" + Bucket ] + + * + [sub-resource] + * The list of sub-resources that must be included when constructing the CanonicalizedResource Element are: acl, lifecycle, location, + * logging, notification, partNumber, policy, requestPayment, torrent, uploadId, uploads, versionId, versioning, versions and website. + * (http://docs.amazonwebservices.com/AmazonS3/latest/dev/RESTAuthentication.html) + * @param query - results from calling "HttpServletRequest req.getQueryString()" + */ + public void setQueryString( String query ) { + if (null == query) { + this.queryString = null; + return; + } + + // Sub-resources (i.e.: query params) must be lex sorted + Set subResources = new TreeSet(); + + String [] queryParams = query.split("&"); + StringBuffer builtQuery= new StringBuffer(); + for (String queryParam:queryParams) { + // lookup parameter name + String paramName = queryParam.split("=")[0]; + if (allowedQueryParams.contains(paramName)) { + subResources.add(queryParam); + } + } + for (String subResource:subResources) { + builtQuery.append(subResource + "&"); + } + // If anything inside the string buffer, add a "?" at the beginning, + // and then remove the last '&' + if (builtQuery.length() > 0) { + builtQuery.insert(0, "?"); + builtQuery.deleteCharAt(builtQuery.length()-1); + } + this.queryString = builtQuery.toString(); + } + + + /** + * Used as part of the CanonalizedResource element of the StringToSign. + * Append the path part of the un-decoded HTTP Request-URI, up-to but not including the query string. + * + * @param path - - results from calling "HttpServletRequest req.getPathInfo()" + */ + public void addUriPath( String path ) { + if (null != path) path = path.trim(); + this.uriPath = path; + } + + + /** + * Pass in each complete Amazon header found in the HTTP request one at a time. + * Each Amazon header added will become part of the signature calculation. + * We are using a TreeMap here because of the S3 definition: + * "Sort the collection of headers lexicographically by header name." + * + * @param headerAndValue - needs to be the complete amazon header (i.e., starts with "x-amz"). + */ + public void addAmazonHeader( String headerAndValue ) { + if (null == headerAndValue) return; + + String canonicalized = null; + + // [A] First Canonicalize the header and its value + // -> we use the header 'name' as the key since we have to sort on that + int offset = headerAndValue.indexOf( ":" ); + String header = headerAndValue.substring( 0, offset+1 ).toLowerCase(); + String value = headerAndValue.substring( offset+1 ).trim(); + + // -> RFC 2616, Section 4.2: unfold the header's value by replacing linear white space with a single space character + // -> does the HTTPServeletReq already do this for us? + value = value.replaceAll( " ", " " ); // -> multiple spaces to one space + value = value.replaceAll( "(\r\n|\t|\n)", " " ); // -> CRLF, tab, and LF to one space + + + // [B] Does this header already exist? + if ( AmazonHeaders.containsKey( header )) { + // -> combine header fields with the same name into one "header-name:comma-separated-value-list" pair as prescribed by RFC 2616, section 4.2, without any white-space between values. + canonicalized = AmazonHeaders.get( header ); + canonicalized = new String( canonicalized + "," + value + "\n" ); + canonicalized = canonicalized.replaceAll( "\n,", "," ); // remove the '\n' from the first stored value + } + else canonicalized = new String( header + value + "\n" ); // -> as per spec, no space between header and its value + + AmazonHeaders.put( header, canonicalized ); + + // [C] "x-amz-date:" takes precedence over the "Date:" header + if (header.equals( "x-amz-date:" )) { + this.amzDateSet = true; + if (null != this.date) this.date = null; + } + } + + + /** + * The request is authenticated if we can regenerate the same signature given + * on the request. Before calling this function make sure to set the header values + * defined by the public values above. + * + * @param httpVerb - the type of HTTP request (e.g., GET, PUT) + * @param secretKey - value obtained from the AWSAccessKeyId + * @param signature - the signature we are trying to recreate, note can be URL-encoded + * + * @throws SignatureException + * + * @return true if request has been authenticated, false otherwise + * @throws UnsupportedEncodingException + */ + + public boolean verifySignature( String httpVerb, String secretKey, String signature ) + throws SignatureException, UnsupportedEncodingException { + + if (null == httpVerb || null == secretKey || null == signature) return false; + + httpVerb = httpVerb.trim(); + secretKey = secretKey.trim(); + signature = signature.trim(); + + // First calculate the StringToSign after the caller has initialized all the header values + String StringToSign = genStringToSign( httpVerb ); + String calSig = calculateRFC2104HMAC( StringToSign, secretKey ); + // Was the passed in signature URL encoded? (it must be base64 encoded) + int offset = signature.indexOf( "%" ); + if (-1 != offset) signature = URLDecoder.decode( signature, "UTF-8" ); + + boolean match = signature.equals( calSig ); + if (!match) + logger.error( "Signature mismatch, [" + signature + "] [" + calSig + "] over [" + StringToSign + "]" ); + + return match; + } + + + /** + * This function generates the single string that will be used to sign with a users + * secret key. + * + * StringToSign = HTTP-Verb + "\n" + + * Content-MD5 + "\n" + + * Content-Type + "\n" + + * Date + "\n" + + * CanonicalizedAmzHeaders + + * CanonicalizedResource; + * + * @return The single StringToSign or null. + */ + private String genStringToSign( String httpVerb ) { + StringBuffer canonicalized = new StringBuffer(); + String temp = null; + String canonicalizedResourceElement = genCanonicalizedResourceElement(); + canonicalized.append( httpVerb ).append( "\n" ); + if ( (null != this.contentMD5) ) + canonicalized.append( this.contentMD5 ); + canonicalized.append( "\n" ); + + if ( (null != this.contentType) ) + canonicalized.append( this.contentType ); + canonicalized.append( "\n" ); + + if (null != this.date) + canonicalized.append( this.date ); + + canonicalized.append( "\n" ); + + if (null != (temp = genCanonicalizedAmzHeadersElement())) canonicalized.append( temp ); + if (null != canonicalizedResourceElement) canonicalized.append( canonicalizedResourceElement ); + + if ( 0 == canonicalized.length()) + return null; + + return canonicalized.toString(); + } + + + /** + * CanonicalizedResource represents the Amazon S3 resource targeted by the request. + * CanonicalizedResource = [ "/" + Bucket ] + + * + + * [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; + * + * @return A single string representing CanonicalizedResource or null. + */ + private String genCanonicalizedResourceElement() { + StringBuffer canonicalized = new StringBuffer(); + + if(this.useSubDomain && this.bucketName != null) + canonicalized.append( "/" ).append( this.bucketName ); + + if (null != this.uriPath ) canonicalized.append( this.uriPath ); + if (null != this.queryString) canonicalized.append( this.queryString ); + + if ( 0 == canonicalized.length()) + return null; + + return canonicalized.toString(); + } + + + /** + * Construct the Canonicalized Amazon headers element of the StringToSign by + * concatenating all headers in the TreeMap into a single string. + * + * @return A single string with all the Amazon headers glued together, or null + * if no Amazon headers appeared in the request. + */ + private String genCanonicalizedAmzHeadersElement() { + Collection headers = AmazonHeaders.values(); + Iterator itr = headers.iterator(); + StringBuffer canonicalized = new StringBuffer(); + + while( itr.hasNext()) + canonicalized.append( itr.next()); + + if ( 0 == canonicalized.length()) + return null; + + return canonicalized.toString(); + } + + + /** + * Create a signature by the following method: + * new String( Base64( SHA1( key, byte array ))) + * + * @param signIt - the data to generate a keyed HMAC over + * @param secretKey - the user's unique key for the HMAC operation + * @return String - the recalculated string + * @throws SignatureException + */ + private String calculateRFC2104HMAC( String signIt, String secretKey ) + throws SignatureException { + String result = null; + try { + SecretKeySpec key = new SecretKeySpec( secretKey.getBytes(), "HmacSHA1" ); + Mac hmacSha1 = Mac.getInstance( "HmacSHA1" ); + hmacSha1.init( key ); + byte [] rawHmac = hmacSha1.doFinal( signIt.getBytes()); + result = new String( Base64.encodeBase64( rawHmac )); + } + catch( InvalidKeyException e ) { + throw new SignatureException( "Failed to generate keyed HMAC on REST request because key " + secretKey + " is invalid" + e.getMessage()); + } + catch (Exception e) { + throw new SignatureException( "Failed to generate keyed HMAC on REST request: " + e.getMessage()); + } + return result.trim(); + } +} diff --git a/awsapi/src/com/cloud/bridge/util/Tuple.java b/awsapi/src/com/cloud/bridge/util/Tuple.java deleted file mode 100644 index a620b55bbf5..00000000000 --- a/awsapi/src/com/cloud/bridge/util/Tuple.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.util; - -/** - * - * @author Kelven Yang - * - * @param - * @param - */ -public class Tuple { - T1 first; - T2 second; - - public Tuple(T1 t1, T2 t2) { - first = t1; - second = t2; - } - - public T1 getFirst() { - return first; - } - - public Tuple setFirst(T1 t1) { - first = t1; - return this; - } - - public T2 getSecond() { - return second; - } - - public Tuple setSecond(T2 t2) { - second = t2; - return this; - } -}