mirror of https://github.com/apache/cloudstack.git
executePutBucketAcl, executePutObjectAcl
This commit is contained in:
parent
f05e3dd456
commit
82b59ee1da
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
<property name="show_sql">true</property>
|
||||
<!-- to debug hibernate generated SQL, open following configuration property -->
|
||||
<!--
|
||||
<!--
|
||||
<property name="show_sql">true</property>
|
||||
<property name="hibernate.format_sql">true</property>
|
||||
-->
|
||||
-->
|
||||
|
||||
<!-- Mapping files -->
|
||||
<mapping resource="com/cloud/bridge/model/UserCredentials.hbm.xml"/>
|
||||
|
|
|
|||
|
|
@ -18,9 +18,21 @@ package com.cloud.bridge.model;
|
|||
import java.io.Serializable;
|
||||
import java.util.Date;
|
||||
|
||||
import com.cloud.bridge.service.exception.UnsupportedException;
|
||||
import com.cloud.bridge.util.OrderedPair;
|
||||
import com.cloud.bridge.util.Triple;
|
||||
|
||||
/**
|
||||
* @author Kelven Yang, John Zucker
|
||||
* A model of stored ACLs to remember the ACL permissions per canonicalUserID and grantee
|
||||
* A model of stored ACLs to remember the ACL permissions per canonicalUserID per grantee
|
||||
* Hold the AWS S3 grantee and permission constants
|
||||
* Implement two forms of getCannedAccessControls mappings
|
||||
* (a) an OrderedPair which provides a maplet across
|
||||
* < permission, grantee >
|
||||
* when given an aclRequestString and a target (i.e. bucket or object),
|
||||
* (b) a Triplet
|
||||
* < permission1, permission2, symbol >
|
||||
* when given an aclRequestString, a target (i.e. bucket or object) and the ID of the owner.
|
||||
*/
|
||||
public class SAcl implements Serializable {
|
||||
private static final long serialVersionUID = 7900837117165018850L;
|
||||
|
|
@ -125,4 +137,100 @@ public class SAcl implements Serializable {
|
|||
public void setLastModifiedTime(Date lastModifiedTime) {
|
||||
this.lastModifiedTime = lastModifiedTime;
|
||||
}
|
||||
|
||||
/** Return an OrderedPair
|
||||
* < permission, grantee >
|
||||
* comprising
|
||||
* a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
|
||||
* SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL
|
||||
* a grantee - which is one of GRANTEE_ALLUSERS, GRANTEE_AUTHENTICATED, GRANTEE_USER
|
||||
*
|
||||
* Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets.
|
||||
* The ACL request string is treated as a request for a known cannedAccessPolicy
|
||||
* @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs
|
||||
* @param target - Either "SBucket" or otherwise assumed to be for a single object item
|
||||
*/
|
||||
public static OrderedPair <Integer,Integer> getCannedAccessControls ( String aclRequestString, String target )
|
||||
throws UnsupportedException
|
||||
{
|
||||
if ( aclRequestString.equalsIgnoreCase( "public-read" ))
|
||||
// All users granted READ access.
|
||||
return new OrderedPair <Integer,Integer> (PERMISSION_READ,GRANTEE_ALLUSERS);
|
||||
else if (aclRequestString.equalsIgnoreCase( "public-read-write" ))
|
||||
// All users granted READ and WRITE access
|
||||
return new OrderedPair <Integer,Integer> ((PERMISSION_READ | PERMISSION_WRITE),GRANTEE_ALLUSERS);
|
||||
else if (aclRequestString.equalsIgnoreCase( "authenticated-read" ))
|
||||
// Authenticated users have READ access
|
||||
return new OrderedPair <Integer,Integer> (PERMISSION_READ,GRANTEE_AUTHENTICATED);
|
||||
else if (aclRequestString.equalsIgnoreCase( "private" ))
|
||||
// Only Owner gets FULL_CONTROL
|
||||
return new OrderedPair <Integer,Integer> (PERMISSION_FULL,GRANTEE_USER);
|
||||
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-read" ))
|
||||
{
|
||||
// Object Owner gets FULL_CONTROL, Bucket Owner gets READ
|
||||
if ( target.equalsIgnoreCase( "SBucket" ))
|
||||
return new OrderedPair <Integer,Integer> (PERMISSION_READ, GRANTEE_USER);
|
||||
else
|
||||
return new OrderedPair <Integer,Integer> (PERMISSION_FULL, GRANTEE_USER);
|
||||
}
|
||||
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" ))
|
||||
{
|
||||
// Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL
|
||||
// This is equivalent to private when used with PUT Bucket
|
||||
return new OrderedPair <Integer,Integer> (PERMISSION_FULL,GRANTEE_USER);
|
||||
}
|
||||
else throw new UnsupportedException( "Unknown Canned Access Policy: " + aclRequestString + " is not supported" );
|
||||
}
|
||||
|
||||
/** Return a Triple
|
||||
* < permission1, permission2, symbol >
|
||||
* comprising
|
||||
* two permissions - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
|
||||
* SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL
|
||||
* permission1 applies to objects, permission2 applies to buckets.
|
||||
* a symbol to indicate whether the principal is anonymous (i.e. string "A") or authenticated user (i.e.
|
||||
* string "*") - otherwise null indicates a single ACL for all users.
|
||||
*
|
||||
* Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets.
|
||||
* The ACL request string is treated as a request for a known cannedAccessPolicy
|
||||
* @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs
|
||||
* @param target - Either "SBucket" or otherwise assumed to be for a single object item
|
||||
* @param ownerID - An ID for the owner, if used in place of symbols "A" or "*"
|
||||
*/
|
||||
public static Triple <Integer,Integer,String> getCannedAccessControls ( String aclRequestString, String target, String ownerID )
|
||||
throws UnsupportedException
|
||||
{
|
||||
if ( aclRequestString.equalsIgnoreCase( "public-read" ))
|
||||
// Owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ access.
|
||||
return new Triple <Integer, Integer, String> (PERMISSION_FULL, PERMISSION_READ,"A");
|
||||
else if (aclRequestString.equalsIgnoreCase( "public-read-write" ))
|
||||
// Owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ and WRITE access
|
||||
return new Triple <Integer, Integer, String> (PERMISSION_FULL, (PERMISSION_READ | PERMISSION_WRITE),"A");
|
||||
else if (aclRequestString.equalsIgnoreCase( "authenticated-read" ))
|
||||
// Owner gets FULL_CONTROL and ANY principal authenticated as a registered S3 user (the '*' symbol here) is granted READ access
|
||||
return new Triple <Integer, Integer, String> (PERMISSION_FULL, PERMISSION_READ,"*");
|
||||
else if (aclRequestString.equalsIgnoreCase( "private" ))
|
||||
// This is termed the "private" or default ACL, "Owner gets FULL_CONTROL"
|
||||
return new Triple <Integer, Integer, String> (PERMISSION_FULL, PERMISSION_FULL,null);
|
||||
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-read" ))
|
||||
{
|
||||
// Object Owner gets FULL_CONTROL, Bucket Owner gets READ
|
||||
// This is equivalent to private when used with PUT Bucket
|
||||
if ( target.equalsIgnoreCase( "SBucket" ))
|
||||
return new Triple <Integer, Integer, String> (PERMISSION_FULL,PERMISSION_FULL ,null);
|
||||
else
|
||||
return new Triple <Integer, Integer, String> (PERMISSION_FULL,PERMISSION_READ,ownerID);
|
||||
}
|
||||
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" ))
|
||||
{
|
||||
// Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL
|
||||
// This is equivalent to private when used with PUT Bucket
|
||||
if ( target.equalsIgnoreCase( "SBucket" ))
|
||||
return new Triple <Integer, Integer, String> (PERMISSION_FULL, PERMISSION_FULL, null);
|
||||
else
|
||||
return new Triple <Integer, Integer, String> (PERMISSION_FULL,PERMISSION_FULL, ownerID);
|
||||
}
|
||||
else throw new UnsupportedException( "Unknown Canned Access Policy: " + aclRequestString + " is not supported" );
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,15 @@ import java.util.HashSet;
|
|||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* @author Kelven Yang
|
||||
* @author Kelven Yang, John Zucker
|
||||
* Holds the relation
|
||||
* Id,
|
||||
* Name,
|
||||
* OwnerCanonicalId,
|
||||
* SHost,
|
||||
* CreateTime,
|
||||
* VersioningStatus
|
||||
* For ORM see "com/cloud/bridge/model/SHost.hbm.xml"
|
||||
*/
|
||||
public class SBucket implements Serializable {
|
||||
private static final long serialVersionUID = 7430267766019671273L;
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import java.io.Serializable;
|
|||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* @author Kelven Yang
|
||||
* @author Kelven Yang, John Zucker
|
||||
*/
|
||||
public class SObjectItem implements Serializable {
|
||||
private static final long serialVersionUID = -7351173256185687851L;
|
||||
|
|
|
|||
|
|
@ -38,6 +38,8 @@ public class EntityDao<T> {
|
|||
this.clazz = clazz;
|
||||
|
||||
// Note : beginTransaction can be called multiple times
|
||||
// "If a new underlying transaction is required, begin the transaction. Otherwise continue the new work in the
|
||||
// context of the existing underlying transaction." from the Hibernate spec
|
||||
PersistContext.beginTransaction();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -145,15 +145,15 @@ public class PersistContext {
|
|||
* @return
|
||||
*/
|
||||
public static boolean acquireNamedLock(String name, int timeoutSeconds) {
|
||||
Connection conn = getJDBCConnection(name, true);
|
||||
if(conn == null) {
|
||||
Connection jdbcConnection = getJDBCConnection(name, true);
|
||||
if(jdbcConnection == null) {
|
||||
logger.warn("Unable to acquire named lock connection for named lock: " + name);
|
||||
return false;
|
||||
}
|
||||
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
pstmt = conn.prepareStatement("SELECT COALESCE(GET_LOCK(?, ?),0)");
|
||||
pstmt = jdbcConnection.prepareStatement("SELECT COALESCE(GET_LOCK(?, ?),0)");
|
||||
|
||||
pstmt.setString(1, name);
|
||||
pstmt.setInt(2, timeoutSeconds);
|
||||
|
|
@ -185,15 +185,15 @@ public class PersistContext {
|
|||
}
|
||||
|
||||
public static boolean releaseNamedLock(String name) {
|
||||
Connection conn = getJDBCConnection(name, false);
|
||||
if(conn == null) {
|
||||
Connection jdbcConnection = getJDBCConnection(name, false);
|
||||
if(jdbcConnection == null) {
|
||||
logger.error("Unable to acquire DB connection for global lock system");
|
||||
return false;
|
||||
}
|
||||
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
pstmt = conn.prepareStatement("SELECT COALESCE(RELEASE_LOCK(?), 0)");
|
||||
pstmt = jdbcConnection.prepareStatement("SELECT COALESCE(RELEASE_LOCK(?), 0)");
|
||||
pstmt.setString(1, name);
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
if(rs != null && rs.first())
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import com.cloud.bridge.util.ConfigurationHelper;
|
|||
public class BucketPolicyDao {
|
||||
public static final Logger logger = Logger.getLogger(BucketPolicyDao.class);
|
||||
|
||||
private Connection conn = null;
|
||||
private Connection jdbcConnection = null;
|
||||
private String dbName = null;
|
||||
private String dbUser = null;
|
||||
private String dbPassword = null;
|
||||
|
|
@ -65,7 +65,7 @@ public class BucketPolicyDao {
|
|||
|
||||
openConnection();
|
||||
try {
|
||||
statement = conn.prepareStatement ( "INSERT INTO bucket_policies (BucketName, OwnerCanonicalID, Policy) VALUES (?,?,?)" );
|
||||
statement = jdbcConnection.prepareStatement ( "INSERT INTO bucket_policies (BucketName, OwnerCanonicalID, Policy) VALUES (?,?,?)" );
|
||||
statement.setString( 1, bucketName );
|
||||
statement.setString( 2, owner );
|
||||
statement.setString( 3, policy );
|
||||
|
|
@ -89,7 +89,7 @@ public class BucketPolicyDao {
|
|||
|
||||
openConnection();
|
||||
try {
|
||||
statement = conn.prepareStatement ( "SELECT OwnerCanonicalID FROM bucket_policies WHERE BucketName=?" );
|
||||
statement = jdbcConnection.prepareStatement ( "SELECT OwnerCanonicalID FROM bucket_policies WHERE BucketName=?" );
|
||||
statement.setString( 1, bucketName );
|
||||
ResultSet rs = statement.executeQuery();
|
||||
if (rs.next()) owner = rs.getString( "OwnerCanonicalID" );
|
||||
|
|
@ -109,7 +109,7 @@ public class BucketPolicyDao {
|
|||
|
||||
openConnection();
|
||||
try {
|
||||
statement = conn.prepareStatement ( "SELECT Policy FROM bucket_policies WHERE BucketName=?" );
|
||||
statement = jdbcConnection.prepareStatement ( "SELECT Policy FROM bucket_policies WHERE BucketName=?" );
|
||||
statement.setString( 1, bucketName );
|
||||
ResultSet rs = statement.executeQuery();
|
||||
if (rs.next()) policy = rs.getString( "Policy" );
|
||||
|
|
@ -128,7 +128,7 @@ public class BucketPolicyDao {
|
|||
|
||||
openConnection();
|
||||
try {
|
||||
statement = conn.prepareStatement ( "DELETE FROM bucket_policies WHERE BucketName=?" );
|
||||
statement = jdbcConnection.prepareStatement ( "DELETE FROM bucket_policies WHERE BucketName=?" );
|
||||
statement.setString( 1, bucketName );
|
||||
int count = statement.executeUpdate();
|
||||
statement.close();
|
||||
|
|
@ -141,14 +141,14 @@ public class BucketPolicyDao {
|
|||
private void openConnection()
|
||||
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
|
||||
{
|
||||
if (null == conn) {
|
||||
if (null == jdbcConnection) {
|
||||
Class.forName( "com.mysql.jdbc.Driver" ).newInstance();
|
||||
conn = DriverManager.getConnection( "jdbc:mysql://localhost:3306/"+dbName, dbUser, dbPassword );
|
||||
jdbcConnection = DriverManager.getConnection( "jdbc:mysql://localhost:3306/"+dbName, dbUser, dbPassword );
|
||||
}
|
||||
}
|
||||
|
||||
private void closeConnection() throws SQLException {
|
||||
if (null != conn) conn.close();
|
||||
conn = null;
|
||||
if (null != jdbcConnection) jdbcConnection.close();
|
||||
jdbcConnection = null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,13 +45,18 @@ import com.amazon.s3.ListBucketResponse;
|
|||
import com.cloud.bridge.io.MTOMAwareResultStreamWriter;
|
||||
import com.cloud.bridge.model.SAcl;
|
||||
import com.cloud.bridge.model.SBucket;
|
||||
import com.cloud.bridge.model.SHost;
|
||||
import com.cloud.bridge.persist.PersistContext;
|
||||
import com.cloud.bridge.persist.dao.BucketPolicyDao;
|
||||
import com.cloud.bridge.persist.dao.MultipartLoadDao;
|
||||
import com.cloud.bridge.persist.dao.SAclDao;
|
||||
import com.cloud.bridge.persist.dao.SBucketDao;
|
||||
import com.cloud.bridge.service.S3Constants;
|
||||
import com.cloud.bridge.service.S3RestServlet;
|
||||
import com.cloud.bridge.service.UserContext;
|
||||
import com.cloud.bridge.service.core.s3.S3AccessControlList;
|
||||
import com.cloud.bridge.service.core.s3.S3AccessControlPolicy;
|
||||
import com.cloud.bridge.service.core.s3.S3BucketAdapter;
|
||||
import com.cloud.bridge.service.core.s3.S3BucketPolicy;
|
||||
import com.cloud.bridge.service.core.s3.S3CanonicalUser;
|
||||
import com.cloud.bridge.service.core.s3.S3CreateBucketConfiguration;
|
||||
|
|
@ -60,6 +65,7 @@ import com.cloud.bridge.service.core.s3.S3CreateBucketResponse;
|
|||
import com.cloud.bridge.service.core.s3.S3DeleteBucketRequest;
|
||||
import com.cloud.bridge.service.core.s3.S3Engine;
|
||||
import com.cloud.bridge.service.core.s3.S3GetBucketAccessControlPolicyRequest;
|
||||
import com.cloud.bridge.service.core.s3.S3Grant;
|
||||
import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsRequest;
|
||||
import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsResponse;
|
||||
import com.cloud.bridge.service.core.s3.S3ListBucketObjectEntry;
|
||||
|
|
@ -74,13 +80,18 @@ import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess;
|
|||
import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions;
|
||||
import com.cloud.bridge.service.core.s3.S3PolicyCondition.ConditionKeys;
|
||||
import com.cloud.bridge.service.exception.InternalErrorException;
|
||||
import com.cloud.bridge.service.exception.InvalidBucketName;
|
||||
import com.cloud.bridge.service.exception.InvalidRequestContentException;
|
||||
import com.cloud.bridge.service.exception.NetworkIOException;
|
||||
import com.cloud.bridge.service.exception.ObjectAlreadyExistsException;
|
||||
import com.cloud.bridge.service.exception.OutOfServiceException;
|
||||
import com.cloud.bridge.service.exception.PermissionDeniedException;
|
||||
import com.cloud.bridge.util.Converter;
|
||||
import com.cloud.bridge.util.DateHelper;
|
||||
import com.cloud.bridge.util.PolicyParser;
|
||||
import com.cloud.bridge.util.StringHelper;
|
||||
import com.cloud.bridge.util.OrderedPair;
|
||||
import com.cloud.bridge.util.Triple;
|
||||
import com.cloud.bridge.util.XSerializer;
|
||||
import com.cloud.bridge.util.XSerializerXmlAdapter;
|
||||
|
||||
|
|
@ -438,7 +449,7 @@ public class S3BucketAction implements ServletAction {
|
|||
cal.set( 1970, 1, 1 );
|
||||
engineRequest.setAccessKey(UserContext.current().getAccessKey());
|
||||
engineRequest.setRequestTimestamp( cal );
|
||||
engineRequest.setSignature( "" ); // TODO - Provide signature
|
||||
engineRequest.setSignature( "" ); // TODO - Consider providing signature in a future release which allows additional user description
|
||||
engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY));
|
||||
|
||||
S3AccessControlPolicy engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest);
|
||||
|
|
@ -644,25 +655,41 @@ public class S3BucketAction implements ServletAction {
|
|||
}
|
||||
|
||||
public void executePutBucketAcl(HttpServletRequest request, HttpServletResponse response) throws IOException
|
||||
{
|
||||
S3PutObjectRequest putRequest = null;
|
||||
{
|
||||
// [A] Determine that there is an applicable bucket which might have an ACL set
|
||||
|
||||
// -> reuse the Access Control List parsing code that was added to support DIME
|
||||
String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY);
|
||||
try {
|
||||
putRequest = S3RestServlet.toEnginePutObjectRequest( request.getInputStream());
|
||||
}
|
||||
catch( Exception e ) {
|
||||
throw new IOException( e.toString());
|
||||
}
|
||||
|
||||
// -> reuse the SOAP code to save the passed in ACLs
|
||||
String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY);
|
||||
SBucketDao bucketDao = new SBucketDao();
|
||||
SBucket bucket = bucketDao.getByName( bucketName );
|
||||
String owner = null;
|
||||
if ( null != bucket )
|
||||
owner = bucket.getOwnerCanonicalId();
|
||||
if (null == owner)
|
||||
{
|
||||
logger.error( "ACL update failed since " + bucketName + " does not exist" );
|
||||
throw new IOException("ACL update failed");
|
||||
}
|
||||
|
||||
// [B] Obtain the grant request which applies to the acl request string. This latter is supplied as the value of the x-amz-acl header.
|
||||
|
||||
S3SetBucketAccessControlPolicyRequest engineRequest = new S3SetBucketAccessControlPolicyRequest();
|
||||
engineRequest.setBucketName( bucketName );
|
||||
engineRequest.setAcl( putRequest.getAcl());
|
||||
S3Grant grantRequest = new S3Grant();
|
||||
S3AccessControlList aclRequest = new S3AccessControlList();
|
||||
|
||||
String aclRequestString = request.getHeader("x-amz-acl");
|
||||
OrderedPair <Integer,Integer> accessControlsForBucketOwner = SAcl.getCannedAccessControls(aclRequestString,"SBucket");
|
||||
grantRequest.setPermission(accessControlsForBucketOwner.getFirst());
|
||||
grantRequest.setGrantee(accessControlsForBucketOwner.getSecond());
|
||||
grantRequest.setCanonicalUserID(owner);
|
||||
aclRequest.addGrant(grantRequest);
|
||||
engineRequest.setAcl(aclRequest);
|
||||
engineRequest.setBucketName(bucketName);
|
||||
|
||||
|
||||
// [C] Allow an S3Engine to handle the S3SetBucketAccessControlPolicyRequest
|
||||
S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest);
|
||||
response.setStatus( engineResponse.getResultCode());
|
||||
|
||||
}
|
||||
|
||||
public void executePutBucketVersioning(HttpServletRequest request, HttpServletResponse response) throws IOException
|
||||
|
|
@ -700,8 +727,8 @@ public class S3BucketAction implements ServletAction {
|
|||
}
|
||||
|
||||
try {
|
||||
// -> does not matter what the ACLs say only the owner can turn on versioning on a bucket
|
||||
// -> the bucket owner may want to restrict the IP address from which this can occur
|
||||
// Irrespective of what the ACLs say only the owner can turn on versioning on a bucket.
|
||||
// The bucket owner may want to restrict the IP address from which this can occur.
|
||||
SBucketDao bucketDao = new SBucketDao();
|
||||
SBucket sbucket = bucketDao.getByName( bucketName );
|
||||
|
||||
|
|
@ -760,7 +787,7 @@ public class S3BucketAction implements ServletAction {
|
|||
}
|
||||
|
||||
/**
|
||||
* This is a complex function with all the options defined by Amazon. Part of the functionality is
|
||||
* Multipart upload is a complex operation with all the options defined by Amazon. Part of the functionality is
|
||||
* provided by the query done against the database. The CommonPrefixes functionality is done the same way
|
||||
* as done in the listBucketContents function (i.e., by iterating though the list to decide which output
|
||||
* element each key is placed).
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ import com.cloud.bridge.persist.dao.SBucketDao;
|
|||
import com.cloud.bridge.service.S3Constants;
|
||||
import com.cloud.bridge.service.S3RestServlet;
|
||||
import com.cloud.bridge.service.UserContext;
|
||||
import com.cloud.bridge.service.core.s3.S3AccessControlList;
|
||||
import com.cloud.bridge.service.core.s3.S3AccessControlPolicy;
|
||||
import com.cloud.bridge.service.core.s3.S3AuthParams;
|
||||
import com.cloud.bridge.service.core.s3.S3ConditionalHeaders;
|
||||
|
|
@ -60,6 +61,7 @@ import com.cloud.bridge.service.core.s3.S3Engine;
|
|||
import com.cloud.bridge.service.core.s3.S3GetObjectAccessControlPolicyRequest;
|
||||
import com.cloud.bridge.service.core.s3.S3GetObjectRequest;
|
||||
import com.cloud.bridge.service.core.s3.S3GetObjectResponse;
|
||||
import com.cloud.bridge.service.core.s3.S3Grant;
|
||||
import com.cloud.bridge.service.core.s3.S3MetaDataEntry;
|
||||
import com.cloud.bridge.service.core.s3.S3MultipartPart;
|
||||
import com.cloud.bridge.service.core.s3.S3PolicyContext;
|
||||
|
|
@ -67,6 +69,7 @@ import com.cloud.bridge.service.core.s3.S3PutObjectInlineRequest;
|
|||
import com.cloud.bridge.service.core.s3.S3PutObjectInlineResponse;
|
||||
import com.cloud.bridge.service.core.s3.S3PutObjectRequest;
|
||||
import com.cloud.bridge.service.core.s3.S3Response;
|
||||
import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest;
|
||||
import com.cloud.bridge.service.core.s3.S3SetObjectAccessControlPolicyRequest;
|
||||
import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions;
|
||||
import com.cloud.bridge.service.exception.PermissionDeniedException;
|
||||
|
|
@ -259,41 +262,49 @@ public class S3ObjectAction implements ServletAction {
|
|||
}
|
||||
|
||||
private void executePutObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException
|
||||
{
|
||||
S3PutObjectRequest putRequest = null;
|
||||
|
||||
String ACLsetting = request.getHeader("x-amz-acl");
|
||||
{
|
||||
// [A] Determine that there is an applicable bucket which might have an ACL set
|
||||
|
||||
String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY);
|
||||
String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY);
|
||||
|
||||
if ( null == ACLsetting )
|
||||
// -> reuse the Access Control List parsing code that was added to support DIME
|
||||
try {
|
||||
|
||||
putRequest = S3RestServlet.toEnginePutObjectRequest( request.getInputStream());
|
||||
}
|
||||
catch( Exception e ) {
|
||||
throw new IOException( e.toString());
|
||||
}
|
||||
|
||||
// -> reuse the SOAP code to save the passed in ACLs
|
||||
S3SetObjectAccessControlPolicyRequest engineRequest = new S3SetObjectAccessControlPolicyRequest();
|
||||
engineRequest.setBucketName( bucketName );
|
||||
engineRequest.setKey( key );
|
||||
// if (null == putRequest)
|
||||
// engineRequest.setAcl (an S3AccessContolList) // (ACLsetting);
|
||||
// else
|
||||
engineRequest.setAcl( putRequest.getAcl());
|
||||
|
||||
// -> is this a request for a specific version of the object? look for "versionId=" in the query string
|
||||
String queryString = request.getQueryString();
|
||||
if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" ));
|
||||
|
||||
S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest);
|
||||
String version = engineResponse.getVersion();
|
||||
if (null != version) response.addHeader( "x-amz-version-id", version );
|
||||
response.setStatus( engineResponse.getResultCode());
|
||||
SBucketDao bucketDao = new SBucketDao();
|
||||
SBucket bucket = bucketDao.getByName( bucketName );
|
||||
String owner = null;
|
||||
if ( null != bucket )
|
||||
owner = bucket.getOwnerCanonicalId();
|
||||
if (null == owner)
|
||||
{
|
||||
logger.error( "ACL update failed since " + bucketName + " does not exist" );
|
||||
throw new IOException("ACL update failed");
|
||||
}
|
||||
if (null == key)
|
||||
{
|
||||
logger.error( "ACL update failed since " + bucketName + " does not contain the expected key" );
|
||||
throw new IOException("ACL update failed");
|
||||
}
|
||||
|
||||
// [B] Obtain the grant request which applies to the acl request string. This latter is supplied as the value of the x-amz-acl header.
|
||||
|
||||
S3SetObjectAccessControlPolicyRequest engineRequest = new S3SetObjectAccessControlPolicyRequest();
|
||||
S3Grant grantRequest = new S3Grant();
|
||||
S3AccessControlList aclRequest = new S3AccessControlList();
|
||||
|
||||
String aclRequestString = request.getHeader("x-amz-acl");
|
||||
OrderedPair <Integer,Integer> accessControlsForObjectOwner = SAcl.getCannedAccessControls(aclRequestString,"SObject");
|
||||
grantRequest.setPermission(accessControlsForObjectOwner.getFirst());
|
||||
grantRequest.setGrantee(accessControlsForObjectOwner.getSecond());
|
||||
grantRequest.setCanonicalUserID(owner);
|
||||
aclRequest.addGrant(grantRequest);
|
||||
engineRequest.setAcl(aclRequest);
|
||||
engineRequest.setBucketName(bucketName);
|
||||
engineRequest.setKey(key);
|
||||
|
||||
|
||||
// [C] Allow an S3Engine to handle the S3SetObjectAccessControlPolicyRequest
|
||||
S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest);
|
||||
response.setStatus( engineResponse.getResultCode());
|
||||
|
||||
}
|
||||
|
||||
private void executeGetObject(HttpServletRequest request, HttpServletResponse response) throws IOException
|
||||
|
|
@ -354,7 +365,6 @@ public class S3ObjectAction implements ServletAction {
|
|||
S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n");
|
||||
}
|
||||
|
||||
// String contentType = request.getHeader( "Content-Type" ); TODO - Needed?
|
||||
long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0);
|
||||
|
||||
String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY);
|
||||
|
|
@ -641,8 +651,6 @@ public class S3ObjectAction implements ServletAction {
|
|||
|
||||
long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0);
|
||||
|
||||
// String md5 = request.getHeader( "Content-MD5" ); TODO _ Needed?
|
||||
|
||||
String temp = request.getParameter("uploadId");
|
||||
if (null != temp) uploadId = Integer.parseInt( temp );
|
||||
|
||||
|
|
|
|||
|
|
@ -206,7 +206,6 @@ public class S3SerializableServiceImplementation implements AmazonS3SkeletonInte
|
|||
request.setBucketName(setBucketAccessControlPolicy.getBucket());
|
||||
request.setAcl(toEngineAccessControlList(setBucketAccessControlPolicy.getAccessControlList()));
|
||||
|
||||
// S3Response basicResponse = engine.handleRequest(request); TODO - Needed?
|
||||
SetBucketAccessControlPolicyResponse response = new SetBucketAccessControlPolicyResponse();
|
||||
return response;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -116,13 +116,13 @@ public class ServiceProvider {
|
|||
}
|
||||
|
||||
/**
|
||||
* We return a tuple to distinguish between two cases:
|
||||
* We return a 2-tuple to distinguish between two cases:
|
||||
* (1) there is no entry in the map for bucketName, and (2) there is a null entry
|
||||
* in the map for bucketName. In case 2, the database was inspected for the
|
||||
* bucket policy but it had none so we cache it here to reduce database lookups.
|
||||
* @param bucketName
|
||||
* @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined
|
||||
* even if its set at null.
|
||||
* even if it is set at null.
|
||||
*/
|
||||
public OrderedPair<S3BucketPolicy,Integer> getBucketPolicy(String bucketName) {
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author Kelven Yang
|
||||
* @author Kelven Yang, John Zucker
|
||||
*/
|
||||
public class S3AccessControlList {
|
||||
private List<S3Grant> list = new ArrayList<S3Grant>();
|
||||
|
|
|
|||
|
|
@ -78,9 +78,11 @@ import com.cloud.bridge.util.DateHelper;
|
|||
import com.cloud.bridge.util.PolicyParser;
|
||||
import com.cloud.bridge.util.StringHelper;
|
||||
import com.cloud.bridge.util.OrderedPair;
|
||||
import com.cloud.bridge.util.Triple;
|
||||
|
||||
/**
|
||||
* @author Kelven Yang, John Zucker
|
||||
* The CRUD control actions to be invoked from S3BucketAction or S3ObjectAction.
|
||||
*/
|
||||
public class S3Engine {
|
||||
protected final static Logger logger = Logger.getLogger(S3Engine.class);
|
||||
|
|
@ -93,9 +95,14 @@ public class S3Engine {
|
|||
bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* We treat this simply as first a get and then a put of the object the user wants to copy.
|
||||
* Return a S3CopyObjectResponse which represents an object being copied from source
|
||||
* to destination bucket.
|
||||
* Called from S3ObjectAction when copying an object.
|
||||
* This can be treated as first a GET followed by a PUT of the object the user wants to copy.
|
||||
*/
|
||||
|
||||
public S3CopyObjectResponse handleRequest(S3CopyObjectRequest request)
|
||||
{
|
||||
S3CopyObjectResponse response = new S3CopyObjectResponse();
|
||||
|
|
@ -171,7 +178,7 @@ public class S3Engine {
|
|||
|
||||
if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS))
|
||||
{
|
||||
OrderedPair<SHost, String> shostTuple = null;
|
||||
OrderedPair<SHost, String> shost_storagelocation_pair = null;
|
||||
boolean success = false;
|
||||
try {
|
||||
SBucketDao bucketDao = new SBucketDao();
|
||||
|
|
@ -180,14 +187,14 @@ public class S3Engine {
|
|||
if (bucketDao.getByName(request.getBucketName()) != null)
|
||||
throw new ObjectAlreadyExistsException("Bucket already exists");
|
||||
|
||||
shostTuple = allocBucketStorageHost(request.getBucketName(), null);
|
||||
shost_storagelocation_pair = allocBucketStorageHost(request.getBucketName(), null);
|
||||
|
||||
SBucket sbucket = new SBucket();
|
||||
sbucket.setName(request.getBucketName());
|
||||
sbucket.setCreateTime(DateHelper.currentGMTTime());
|
||||
sbucket.setOwnerCanonicalId( UserContext.current().getCanonicalUserId());
|
||||
sbucket.setShost(shostTuple.getFirst());
|
||||
shostTuple.getFirst().getBuckets().add(sbucket);
|
||||
sbucket.setShost(shost_storagelocation_pair.getFirst());
|
||||
shost_storagelocation_pair.getFirst().getBuckets().add(sbucket);
|
||||
bucketDao.save(sbucket);
|
||||
|
||||
S3AccessControlList acl = request.getAcl();
|
||||
|
|
@ -204,9 +211,9 @@ public class S3Engine {
|
|||
}
|
||||
finally
|
||||
{
|
||||
if(!success && shostTuple != null) {
|
||||
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(shostTuple.getFirst());
|
||||
bucketAdapter.deleteContainer(shostTuple.getSecond(), request.getBucketName());
|
||||
if(!success && shost_storagelocation_pair != null) {
|
||||
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(shost_storagelocation_pair.getFirst());
|
||||
bucketAdapter.deleteContainer(shost_storagelocation_pair.getSecond(), request.getBucketName());
|
||||
}
|
||||
PersistContext.releaseNamedLock("bucket.creation");
|
||||
}
|
||||
|
|
@ -218,6 +225,11 @@ public class S3Engine {
|
|||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a S3Response which represents the effect of an object being deleted from its bucket.
|
||||
* Called from S3BucketAction when deleting an object.
|
||||
*/
|
||||
|
||||
public S3Response handleRequest( S3DeleteBucketRequest request )
|
||||
{
|
||||
S3Response response = new S3Response();
|
||||
|
|
@ -228,9 +240,10 @@ public class S3Engine {
|
|||
if ( sbucket != null )
|
||||
{
|
||||
S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName );
|
||||
switch( verifyPolicy( context )) {
|
||||
switch( verifyPolicy( context ))
|
||||
{
|
||||
case ALLOW:
|
||||
// -> bucket policy can give users permission to delete a bucket while ACLs cannot
|
||||
// The bucket policy can give users permission to delete a bucket whereas ACLs cannot
|
||||
break;
|
||||
|
||||
case DENY:
|
||||
|
|
@ -238,7 +251,7 @@ public class S3Engine {
|
|||
|
||||
case DEFAULT_DENY:
|
||||
default:
|
||||
// -> does not matter what the ACLs say only the owner can delete a bucket
|
||||
// Irrespective of what the ACLs say, only the owner can delete a bucket
|
||||
String client = UserContext.current().getCanonicalUserId();
|
||||
if (!client.equals( sbucket.getOwnerCanonicalId())) {
|
||||
throw new PermissionDeniedException( "Access Denied - only the owner can delete a bucket" );
|
||||
|
|
@ -247,14 +260,16 @@ public class S3Engine {
|
|||
}
|
||||
|
||||
|
||||
// -> delete the file
|
||||
// Delete the file from its storage location
|
||||
OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(sbucket);
|
||||
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
|
||||
bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName());
|
||||
|
||||
// -> cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. We
|
||||
// need to perform deletion of these objects related to bucket manually.
|
||||
// Delete SMeta & SAcl objects: (1)Get all the objects in the bucket, (2)then all the items in each object, (3) then all meta & acl data for each item
|
||||
// Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects.
|
||||
// To delete SMeta & SAcl objects:
|
||||
// (1)Get all the objects in the bucket,
|
||||
// (2)then all the items in each object,
|
||||
// (3) then all meta & acl data for each item
|
||||
Set<SObject> objectsInBucket = sbucket.getObjectsInBucket();
|
||||
Iterator<SObject> it = objectsInBucket.iterator();
|
||||
while( it.hasNext())
|
||||
|
|
@ -270,7 +285,7 @@ public class S3Engine {
|
|||
}
|
||||
}
|
||||
|
||||
// -> delete all the policy state associated with the bucket
|
||||
// Delete all the policy state associated with the bucket
|
||||
try {
|
||||
ServiceProvider.getInstance().deleteBucketPolicy( bucketName );
|
||||
BucketPolicyDao policyDao = new BucketPolicyDao();
|
||||
|
|
@ -292,6 +307,11 @@ public class S3Engine {
|
|||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a S3ListBucketResponse which represents a list of up to 1000 objects contained ins the bucket.
|
||||
* Called from S3BucketAction for GETting objects and for GETting object versions.
|
||||
*/
|
||||
|
||||
public S3ListBucketResponse listBucketContents(S3ListBucketRequest request, boolean includeVersions)
|
||||
{
|
||||
S3ListBucketResponse response = new S3ListBucketResponse();
|
||||
|
|
@ -317,7 +337,7 @@ public class S3Engine {
|
|||
verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ );
|
||||
|
||||
|
||||
// when we query, request one more item so that we know how to set isTruncated flag
|
||||
// Wen execting the query, request one more item so that we know how to set isTruncated flag
|
||||
SObjectDao sobjectDao = new SObjectDao();
|
||||
List<SObject> l = null;
|
||||
|
||||
|
|
@ -335,25 +355,24 @@ public class S3Engine {
|
|||
response.setNextMarker(l.get(l.size() - 1).getNameKey());
|
||||
}
|
||||
|
||||
// SOAP response does not support versioning
|
||||
// If needed - SOAP response does not support versioning
|
||||
response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker()));
|
||||
response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys));
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a S3ListAllMyBucketResponse which represents a list of all buckets owned by the requester.
|
||||
* Called from S3BucketAction for GETting all buckets.
|
||||
* To check on bucket policies defined we have to (look for and) evaluate the policy on each
|
||||
* bucket the user owns.
|
||||
*
|
||||
* @param request
|
||||
* @return
|
||||
*/
|
||||
public S3ListAllMyBucketsResponse handleRequest(S3ListAllMyBucketsRequest request)
|
||||
{
|
||||
S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse();
|
||||
SBucketDao bucketDao = new SBucketDao();
|
||||
|
||||
// -> "...you can only list buckets for which you are the owner."
|
||||
// "...you can only list buckets for which you are the owner."
|
||||
List<SBucket> buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId());
|
||||
S3CanonicalUser owner = new S3CanonicalUser();
|
||||
owner.setID(UserContext.current().getCanonicalUserId());
|
||||
|
|
@ -380,8 +399,13 @@ public class S3Engine {
|
|||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an S3Response representing the result of PUTTING the ACL of a given bucket.
|
||||
* Called from S3BucketAction to PUT its ACL.
|
||||
*/
|
||||
|
||||
public S3Response handleRequest(S3SetBucketAccessControlPolicyRequest request)
|
||||
{
|
||||
{
|
||||
S3Response response = new S3Response();
|
||||
SBucketDao bucketDao = new SBucketDao();
|
||||
String bucketName = request.getBucketName();
|
||||
|
|
@ -391,7 +415,7 @@ public class S3Engine {
|
|||
response.setResultDescription("Bucket does not exist");
|
||||
return response;
|
||||
}
|
||||
|
||||
|
||||
S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName );
|
||||
verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL );
|
||||
|
||||
|
|
@ -403,6 +427,12 @@ public class S3Engine {
|
|||
return response;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return a S3AccessControlPolicy representing the ACL of a given bucket.
|
||||
* Called from S3BucketAction to GET its ACL.
|
||||
*/
|
||||
|
||||
public S3AccessControlPolicy handleRequest(S3GetBucketAccessControlPolicyRequest request)
|
||||
{
|
||||
S3AccessControlPolicy policy = new S3AccessControlPolicy();
|
||||
|
|
@ -427,13 +457,12 @@ public class S3Engine {
|
|||
}
|
||||
|
||||
/**
|
||||
* This function should be called if a multipart upload is aborted OR has completed successfully and
|
||||
* This method should be called if a multipart upload is aborted OR has completed successfully and
|
||||
* the individual parts have to be cleaned up.
|
||||
*
|
||||
* Called from S3ObjectAction when executing at completion or when aborting multipart upload.
|
||||
* @param bucketName
|
||||
* @param uploadId
|
||||
* @param verifyPermission - if false then don't check the user's permission to clean up the state
|
||||
* @return
|
||||
* @param verifyPermission - If false then do not check the user's permission to clean up the state
|
||||
*/
|
||||
public int freeUploadParts(String bucketName, int uploadId, boolean verifyPermission)
|
||||
{
|
||||
|
|
@ -494,8 +523,7 @@ public class S3Engine {
|
|||
* The initiator must have permission to write to the bucket in question in order to initiate
|
||||
* a multipart upload. Also check to make sure the special folder used to store parts of
|
||||
* a multipart exists for this bucket.
|
||||
*
|
||||
* @param request
|
||||
* Called from S3ObjectAction during many stages of multipart upload.
|
||||
*/
|
||||
public S3PutObjectInlineResponse initiateMultipartUpload(S3PutObjectInlineRequest request)
|
||||
{
|
||||
|
|
@ -535,7 +563,7 @@ public class S3Engine {
|
|||
/**
|
||||
* Save the object fragment in a special (i.e., hidden) directory inside the same mount point as
|
||||
* the bucket location that the final object will be stored in.
|
||||
*
|
||||
* Called from S3ObjectAction during many stages of multipart upload.
|
||||
* @param request
|
||||
* @param uploadId
|
||||
* @param partNumber
|
||||
|
|
@ -595,16 +623,16 @@ public class S3Engine {
|
|||
|
||||
/**
|
||||
* Create the real object represented by all the parts of the multipart upload.
|
||||
*
|
||||
* @param httpResp - servelet response handle to return the headers of the response (including version header)
|
||||
* @param request - normal parameters need to create a new object (e.g., meta data)
|
||||
* @param parts - list of files that make up the multipart
|
||||
* @param os - response outputstream, this function can take a long time and we are required to
|
||||
* keep the connection alive by returning whitespace characters back periodically.
|
||||
* @return
|
||||
* @throws IOException
|
||||
* Called from S3ObjectAction at completion of multipart upload.
|
||||
* @param httpResp - Servlet response handle to return the headers of the response (including version header)
|
||||
* @param request - Normal parameters needed to create a new object (including metadata)
|
||||
* @param parts - List of files that make up the multipart
|
||||
* @param outputStream - Response output stream
|
||||
* N.B. - This method can be long-lasting
|
||||
* We are required to keep the connection alive by returning whitespace characters back periodically.
|
||||
*/
|
||||
public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream os) throws IOException
|
||||
|
||||
public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream outputStream) throws IOException
|
||||
{
|
||||
// [A] Set up and initial error checking
|
||||
S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();
|
||||
|
|
@ -641,7 +669,14 @@ public class S3Engine {
|
|||
// explicit transaction control to avoid holding transaction during long file concatenation process
|
||||
PersistContext.commitTransaction();
|
||||
|
||||
OrderedPair<String, Long> result = bucketAdapter.concatentateObjects( host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName, ServiceProvider.getInstance().getMultipartDir(), parts, os );
|
||||
OrderedPair<String, Long> result = bucketAdapter.
|
||||
concatentateObjects
|
||||
( host_storagelocation_pair.getSecond(),
|
||||
bucket.getName(),
|
||||
itemFileName,
|
||||
ServiceProvider.getInstance().getMultipartDir(),
|
||||
parts,
|
||||
outputStream );
|
||||
response.setETag(result.getFirst());
|
||||
response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
|
||||
|
||||
|
|
@ -659,6 +694,11 @@ public class S3Engine {
|
|||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a S3PutObjectInlineResponse which represents an object being created into a bucket
|
||||
* Called from S3ObjectAction when PUTting or POTing an object.
|
||||
*/
|
||||
|
||||
public S3PutObjectInlineResponse handleRequest(S3PutObjectInlineRequest request)
|
||||
{
|
||||
S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();
|
||||
|
|
@ -714,6 +754,11 @@ public class S3Engine {
|
|||
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a S3PutObjectResponse which represents an object being created into a bucket
|
||||
* Called from S3RestServlet when processing a DIME request.
|
||||
*/
|
||||
|
||||
public S3PutObjectResponse handleRequest(S3PutObjectRequest request)
|
||||
{
|
||||
|
|
@ -768,11 +813,10 @@ public class S3Engine {
|
|||
|
||||
/**
|
||||
* The ACL of an object is set at the object version level. By default, PUT sets the ACL of the latest
|
||||
* version of an object. To set the ACL of a different version, use the versionId subresource.
|
||||
*
|
||||
* @param request
|
||||
* @return
|
||||
* version of an object. To set the ACL of a different version, using the versionId subresource.
|
||||
* Called from S3ObjectAction to PUT an object's ACL.
|
||||
*/
|
||||
|
||||
public S3Response handleRequest(S3SetObjectAccessControlPolicyRequest request)
|
||||
{
|
||||
S3PolicyContext context = null;
|
||||
|
|
@ -840,10 +884,9 @@ public class S3Engine {
|
|||
/**
|
||||
* By default, GET returns ACL information about the latest version of an object. To return ACL
|
||||
* information about a different version, use the versionId subresource
|
||||
*
|
||||
* @param request
|
||||
* @return
|
||||
* Called from S3ObjectAction to get an object's ACL.
|
||||
*/
|
||||
|
||||
public S3AccessControlPolicy handleRequest(S3GetObjectAccessControlPolicyRequest request)
|
||||
{
|
||||
S3PolicyContext context = null;
|
||||
|
|
@ -908,11 +951,10 @@ public class S3Engine {
|
|||
}
|
||||
|
||||
/**
|
||||
* Implements both GetObject and GetObjectExtended.
|
||||
*
|
||||
* @param request
|
||||
* @return
|
||||
* Handle requests for GET object and HEAD "get object extended"
|
||||
* Called from S3ObjectAction for GET and HEAD of an object.
|
||||
*/
|
||||
|
||||
public S3GetObjectResponse handleRequest(S3GetObjectRequest request)
|
||||
{
|
||||
S3GetObjectResponse response = new S3GetObjectResponse();
|
||||
|
|
@ -1040,11 +1082,12 @@ public class S3Engine {
|
|||
}
|
||||
|
||||
/**
|
||||
* In one place we handle both versioning and non-versioning delete requests.
|
||||
* Handle object deletion requests, both versioning and non-versioning requirements.
|
||||
* Called from S3ObjectAction for deletion.
|
||||
*/
|
||||
public S3Response handleRequest(S3DeleteObjectRequest request)
|
||||
{
|
||||
// -> verify that the bucket and object exist
|
||||
// Verify that the bucket and object exist
|
||||
S3Response response = new S3Response();
|
||||
SBucketDao bucketDao = new SBucketDao();
|
||||
String bucketName = request.getBucketName();
|
||||
|
|
@ -1065,7 +1108,7 @@ public class S3Engine {
|
|||
}
|
||||
|
||||
|
||||
// -> versioning controls what delete means
|
||||
// Discover whether versioning is enabled. If so versioning requires the setting of a deletion marker.
|
||||
String storedPath = null;
|
||||
SObjectItem item = null;
|
||||
int versioningStatus = sbucket.getVersioningStatus();
|
||||
|
|
@ -1078,12 +1121,12 @@ public class S3Engine {
|
|||
verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE );
|
||||
|
||||
if (null == wantVersion) {
|
||||
// -> if versioning is on and no versionId is given then we just write a deletion marker
|
||||
// If versioning is on and no versionId is given then we just write a deletion marker
|
||||
sobject.setDeletionMark( UUID.randomUUID().toString());
|
||||
objectDao.update( sobject );
|
||||
}
|
||||
else {
|
||||
// -> are we removing the delete marker?
|
||||
// Otherwise remove the deletion marker if this has been set
|
||||
String deletionMarker = sobject.getDeletionMark();
|
||||
if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) {
|
||||
sobject.setDeletionMark( null );
|
||||
|
|
@ -1092,13 +1135,13 @@ public class S3Engine {
|
|||
return response;
|
||||
}
|
||||
|
||||
// -> if versioning is on and the versionId is given then we delete the object matching that version
|
||||
// If versioning is on and the versionId is given (non-null) then delete the object matching that version
|
||||
if ( null == (item = sobject.getVersion( wantVersion ))) {
|
||||
response.setResultCode(404);
|
||||
return response;
|
||||
}
|
||||
else {
|
||||
// -> just delete the one item that matches the versionId from the database
|
||||
// Providing versionId is non-null, then just delete the one item that matches the versionId from the database
|
||||
storedPath = item.getStoredPath();
|
||||
sobject.deleteItem( item.getId());
|
||||
objectDao.update( sobject );
|
||||
|
|
@ -1106,7 +1149,7 @@ public class S3Engine {
|
|||
}
|
||||
}
|
||||
else
|
||||
{ // -> if versioning is off then we do delete the null object
|
||||
{ // If versioning is off then we do delete the null object
|
||||
S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObject, bucketName );
|
||||
context.setKeyName( nameKey );
|
||||
verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE );
|
||||
|
|
@ -1116,10 +1159,10 @@ public class S3Engine {
|
|||
return response;
|
||||
}
|
||||
else {
|
||||
// -> if no item with a null version then we are done
|
||||
// If there is no item with a null version then we are done
|
||||
if (null == item.getVersion()) {
|
||||
// -> remove the entire object
|
||||
// -> cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects.
|
||||
// Otherwiswe remove the entire object
|
||||
// Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects.
|
||||
storedPath = item.getStoredPath();
|
||||
deleteMetaData( item.getId());
|
||||
deleteObjectAcls( "SObjectItem", item.getId());
|
||||
|
|
@ -1128,7 +1171,7 @@ public class S3Engine {
|
|||
}
|
||||
}
|
||||
|
||||
// -> delete the file holding the object
|
||||
// Delete the file holding the object
|
||||
if (null != storedPath)
|
||||
{
|
||||
OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost( sbucket );
|
||||
|
|
@ -1233,11 +1276,11 @@ public class S3Engine {
|
|||
{
|
||||
hitIdMarker = (null == versionIdMarker ? true : false);
|
||||
|
||||
// -> this supports the REST call GET /?versions
|
||||
// This supports GET REST calls with /?versions
|
||||
String deletionMarker = sobject.getDeletionMark();
|
||||
if ( null != deletionMarker )
|
||||
{
|
||||
// -> TODO we don't save the timestamp when something is deleted
|
||||
// TODO we should also save the timestamp when something is deleted
|
||||
S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry();
|
||||
entry.setKey(sobject.getNameKey());
|
||||
entry.setVersion( deletionMarker );
|
||||
|
|
@ -1317,7 +1360,7 @@ public class S3Engine {
|
|||
return entry;
|
||||
}
|
||||
|
||||
public OrderedPair<SHost, String> getBucketStorageHost(SBucket bucket)
|
||||
private OrderedPair<SHost, String> getBucketStorageHost(SBucket bucket)
|
||||
{
|
||||
MHostMountDao mountDao = new MHostMountDao();
|
||||
|
||||
|
|
@ -1381,7 +1424,7 @@ public class S3Engine {
|
|||
return new OrderedPair<SHost, String>(mount.getShost(), mount.getMountPath());
|
||||
}
|
||||
|
||||
// To make things simple, only allow one local mounted storage root
|
||||
// To make things simple, only allow one local mounted storage root TODO - Change in the future
|
||||
String localStorageRoot = ServiceProvider.getInstance().getStartupProperties().getProperty("storage.root");
|
||||
if(localStorageRoot != null) {
|
||||
SHost localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot);
|
||||
|
|
@ -1528,45 +1571,21 @@ public class S3Engine {
|
|||
* Access controls that are specified via the "x-amz-acl:" headers in REST requests.
|
||||
* Note that canned policies can be set when the object's contents are set
|
||||
*/
|
||||
private void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucket bucket )
|
||||
public void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucket bucket )
|
||||
{
|
||||
if ( cannedAccessPolicy.equalsIgnoreCase( "public-read" ))
|
||||
{
|
||||
// -> owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ access.
|
||||
setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, SAcl.PERMISSION_READ, "A" );
|
||||
// Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy
|
||||
Triple<Integer,Integer,String> permission_permission_symbol_triple =
|
||||
SAcl.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId());
|
||||
if ( null == permission_permission_symbol_triple.getThird() )
|
||||
setSingleAcl(target, objectId, permission_permission_symbol_triple.getFirst());
|
||||
else
|
||||
{ setDefaultAcls( target,
|
||||
objectId,
|
||||
permission_permission_symbol_triple.getFirst(), // permission according to ownership of object
|
||||
permission_permission_symbol_triple.getSecond(), // permission according to ownership of bucket
|
||||
permission_permission_symbol_triple.getThird() ); // "symbol" to indicate principal or otherwise name of owner
|
||||
|
||||
}
|
||||
else if (cannedAccessPolicy.equalsIgnoreCase( "public-read-write" ))
|
||||
{
|
||||
// -> owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ and WRITE access
|
||||
setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, (SAcl.PERMISSION_READ | SAcl.PERMISSION_WRITE), "A" );
|
||||
}
|
||||
else if (cannedAccessPolicy.equalsIgnoreCase( "authenticated-read" ))
|
||||
{
|
||||
// -> Owner gets FULL_CONTROL and ANY principal authenticated as a registered S3 user (the '*' symbol here) is granted READ access
|
||||
setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, SAcl.PERMISSION_READ, "*" );
|
||||
}
|
||||
else if (cannedAccessPolicy.equalsIgnoreCase( "private" ))
|
||||
{
|
||||
// -> this is termed the "private" or default ACL, "Owner gets FULL_CONTROL"
|
||||
setSingleAcl( target, objectId, SAcl.PERMISSION_FULL );
|
||||
}
|
||||
else if (cannedAccessPolicy.equalsIgnoreCase( "bucket-owner-read" ))
|
||||
{
|
||||
// -> Object Owner gets FULL_CONTROL, Bucket Owner gets READ
|
||||
// -> is equivalent to private when used with PUT Bucket
|
||||
if ( target.equalsIgnoreCase( "SBucket" ))
|
||||
setSingleAcl( target, objectId, SAcl.PERMISSION_FULL );
|
||||
else setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, SAcl.PERMISSION_READ, bucket.getOwnerCanonicalId());
|
||||
}
|
||||
else if (cannedAccessPolicy.equalsIgnoreCase( "bucket-owner-full-control" ))
|
||||
{
|
||||
// -> Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL
|
||||
// -> is equivalent to private when used with PUT Bucket
|
||||
if ( target.equalsIgnoreCase( "SBucket" ))
|
||||
setSingleAcl( target, objectId, SAcl.PERMISSION_FULL );
|
||||
else setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, SAcl.PERMISSION_FULL, bucket.getOwnerCanonicalId());
|
||||
}
|
||||
else throw new UnsupportedException( "Unknown Canned Access Policy: " + cannedAccessPolicy + " is not supported" );
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1587,10 +1606,10 @@ public class S3Engine {
|
|||
aclDao.save( target, targetId, defaultAcl );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Note that we use the Cloud Stack API Access key for the Canonical User Id everywhere
|
||||
* (i.e., for buckets, and objects).
|
||||
* The Cloud Stack API Access key is used for for the Canonical User Id everywhere (buckets and objects).
|
||||
*
|
||||
* @param owner - this can be the Cloud Access Key for a bucket owner or one of the
|
||||
* following special symbols:
|
||||
|
|
|
|||
|
|
@ -18,9 +18,16 @@ package com.cloud.bridge.service.core.s3;
|
|||
import java.util.List;
|
||||
|
||||
import com.cloud.bridge.model.SAcl;
|
||||
import com.cloud.bridge.model.SBucket;
|
||||
import com.cloud.bridge.service.exception.UnsupportedException;
|
||||
|
||||
/**
|
||||
* @author Kelven Yang
|
||||
* @author Kelven Yang, John Zucker
|
||||
* Each relation holds
|
||||
* a grantee - which is one of SAcl.GRANTEE_USER, SAcl.GRANTEE_ALLUSERS, SAcl.GRANTEE_AUTHENTICATED
|
||||
* a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
|
||||
* SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL
|
||||
* canonicalUserID
|
||||
*/
|
||||
public class S3Grant {
|
||||
private int grantee; // SAcl.GRANTEE_USER etc
|
||||
|
|
@ -54,6 +61,9 @@ public class S3Grant {
|
|||
this.canonicalUserID = canonicalUserID;
|
||||
}
|
||||
|
||||
/* Return an array of S3Grants holding the permissions of grantees by grantee type and their canonicalUserIds.
|
||||
* Used by S3 engine to get ACL policy requests for buckets and objects.
|
||||
*/
|
||||
public static S3Grant[] toGrants(List<SAcl> grants) {
|
||||
if(grants != null)
|
||||
{
|
||||
|
|
@ -70,4 +80,5 @@ public class S3Grant {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue