diff --git a/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java b/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java index e1eb06347be..fafbdcd3470 100644 --- a/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java +++ b/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java @@ -33,12 +33,9 @@ import com.cloud.bridge.persist.dao.UserCredentialsDao; import com.cloud.bridge.service.UserContext; import com.cloud.bridge.util.S3SoapAuth; -<<<<<<< HEAD -======= /* * For SOAP compatibility. */ ->>>>>>> 6472e7b... Now really adding the renamed files! public class AuthenticationHandler implements Handler { protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class); diff --git a/awsapi/src/com/cloud/bridge/lifecycle/ServiceEngineLifecycle.java b/awsapi/src/com/cloud/bridge/lifecycle/ServiceEngineLifecycle.java index 889a3e76c81..e34eb15cfa9 100644 --- a/awsapi/src/com/cloud/bridge/lifecycle/ServiceEngineLifecycle.java +++ b/awsapi/src/com/cloud/bridge/lifecycle/ServiceEngineLifecycle.java @@ -19,11 +19,7 @@ import org.apache.axis2.context.ConfigurationContext; import org.apache.axis2.description.AxisService; import org.apache.axis2.engine.ServiceLifeCycle; -<<<<<<< HEAD import com.cloud.bridge.service.ServiceProvider; -======= -import com.cloud.bridge.service.controller.s3.ServiceProvider; ->>>>>>> 6472e7b... Now really adding the renamed files! /** * @author Kelven Yang diff --git a/awsapi/src/com/cloud/bridge/model/SAcl.java b/awsapi/src/com/cloud/bridge/model/SAcl.java index e94fdcfce16..c4106c695f0 100644 --- a/awsapi/src/com/cloud/bridge/model/SAcl.java +++ b/awsapi/src/com/cloud/bridge/model/SAcl.java @@ -1,251 +1,239 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.model; - -import java.io.Serializable; -import java.util.Date; - -<<<<<<< HEAD -/** - * @author Kelven Yang -======= -import com.cloud.bridge.service.exception.UnsupportedException; -import com.cloud.bridge.util.OrderedPair; -import com.cloud.bridge.util.Triple; - -/** - * @author John Zucker, Kelven Yang - * A model of stored ACLs to remember the ACL permissions per canonicalUserID per grantee - * Hold the AWS S3 grantee and permission constants. - * - * This class implements two forms of getCannedAccessControls mappings, as static methods, - * - * (a) an OrderedPair which provides a maplet across - * < permission, grantee > - * when given an aclRequestString and a target (i.e. bucket or object), - * - * (b) a Triplet - * < permission1, permission2, symbol > - * when given an aclRequestString, a target (i.e. bucket or object) and the ID of the owner. ->>>>>>> 6472e7b... Now really adding the renamed files! - */ -public class SAcl implements Serializable { - private static final long serialVersionUID = 7900837117165018850L; - - public static final int GRANTEE_USER = 0; - public static final int GRANTEE_ALLUSERS = 1; - public static final int GRANTEE_AUTHENTICATED = 2; - -<<<<<<< HEAD - public static final int PERMISSION_PASS = -1; // -> no ACL test required -======= - public static final int PERMISSION_PASS = -1; // No ACL test required ->>>>>>> 6472e7b... Now really adding the renamed files! - public static final int PERMISSION_NONE = 0; - public static final int PERMISSION_READ = 1; - public static final int PERMISSION_WRITE = 2; - public static final int PERMISSION_READ_ACL = 4; - public static final int PERMISSION_WRITE_ACL = 8; - public static final int PERMISSION_FULL = (PERMISSION_READ | PERMISSION_WRITE | PERMISSION_READ_ACL | PERMISSION_WRITE_ACL); - - private Long id; - - private String target; - private long targetId; - - private int granteeType; - private String granteeCanonicalId; - - private int permission; - private int grantOrder; - - private Date createTime; - private Date lastModifiedTime; - - public SAcl() { - } - - public Long getId() { - return id; - } - - private void setId(Long id) { - this.id = id; - } - - public String getTarget() { - return target; - } - - public void setTarget(String target) { - this.target = target; - } - - public long getTargetId() { - return targetId; - } - - public void setTargetId(long targetId) { - this.targetId = targetId; - } - - public int getGranteeType() { - return granteeType; - } - - public void setGranteeType(int granteeType) { - this.granteeType = granteeType; - } - - public String getGranteeCanonicalId() { - return granteeCanonicalId; - } - - public void setGranteeCanonicalId(String granteeCanonicalId) { - this.granteeCanonicalId = granteeCanonicalId; - } - - public int getPermission() { - return permission; - } - - public void setPermission(int permission) { - this.permission = permission; - } - - public int getGrantOrder() { - return grantOrder; - } - - public void setGrantOrder(int grantOrder) { - this.grantOrder = grantOrder; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getLastModifiedTime() { - return lastModifiedTime; - } - - public void setLastModifiedTime(Date lastModifiedTime) { - this.lastModifiedTime = lastModifiedTime; - } -<<<<<<< HEAD -======= - - /** Return an OrderedPair - * < permission, grantee > - * comprising - * a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ, - * SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL - * a grantee - which is one of GRANTEE_ALLUSERS, GRANTEE_AUTHENTICATED, GRANTEE_USER - * - * Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets. - * The ACL request string is treated as a request for a known cannedAccessPolicy - * @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs - * @param target - Either "SBucket" or otherwise assumed to be for a single object item - */ - public static OrderedPair getCannedAccessControls ( String aclRequestString, String target ) - throws UnsupportedException - { - if ( aclRequestString.equalsIgnoreCase( "public-read" )) - // All users granted READ access. - return new OrderedPair (PERMISSION_READ,GRANTEE_ALLUSERS); - else if (aclRequestString.equalsIgnoreCase( "public-read-write" )) - // All users granted READ and WRITE access - return new OrderedPair ((PERMISSION_READ | PERMISSION_WRITE),GRANTEE_ALLUSERS); - else if (aclRequestString.equalsIgnoreCase( "authenticated-read" )) - // Authenticated users have READ access - return new OrderedPair (PERMISSION_READ,GRANTEE_AUTHENTICATED); - else if (aclRequestString.equalsIgnoreCase( "private" )) - // Only Owner gets FULL_CONTROL - return new OrderedPair (PERMISSION_FULL,GRANTEE_USER); - else if (aclRequestString.equalsIgnoreCase( "bucket-owner-read" )) - { - // Object Owner gets FULL_CONTROL, Bucket Owner gets READ - if ( target.equalsIgnoreCase( "SBucket" )) - return new OrderedPair (PERMISSION_READ, GRANTEE_USER); - else - return new OrderedPair (PERMISSION_FULL, GRANTEE_USER); - } - else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" )) - { - // Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL - // This is equivalent to private when used with PUT Bucket - return new OrderedPair (PERMISSION_FULL,GRANTEE_USER); - } - else throw new UnsupportedException( "Unknown Canned Access Policy: " + aclRequestString + " is not supported" ); - } - - /** Return a Triple - * < permission1, permission2, symbol > - * comprising - * two permissions - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ, - * SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL - * permission1 applies to objects, permission2 applies to buckets. - * a symbol to indicate whether the principal is anonymous (i.e. string "A") or authenticated user (i.e. - * string "*") - otherwise null indicates a single ACL for all users. - * - * Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets. - * The ACL request string is treated as a request for a known cannedAccessPolicy - * @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs - * @param target - Either "SBucket" or otherwise assumed to be for a single object item - * @param ownerID - An ID for the owner, if used in place of symbols "A" or "*" - */ - public static Triple getCannedAccessControls ( String aclRequestString, String target, String ownerID ) - throws UnsupportedException - { - if ( aclRequestString.equalsIgnoreCase( "public-read" )) - // Owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ access. - return new Triple (PERMISSION_FULL, PERMISSION_READ,"A"); - else if (aclRequestString.equalsIgnoreCase( "public-read-write" )) - // Owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ and WRITE access - return new Triple (PERMISSION_FULL, (PERMISSION_READ | PERMISSION_WRITE),"A"); - else if (aclRequestString.equalsIgnoreCase( "authenticated-read" )) - // Owner gets FULL_CONTROL and ANY principal authenticated as a registered S3 user (the '*' symbol here) is granted READ access - return new Triple (PERMISSION_FULL, PERMISSION_READ,"*"); - else if (aclRequestString.equalsIgnoreCase( "private" )) - // This is termed the "private" or default ACL, "Owner gets FULL_CONTROL" - return new Triple (PERMISSION_FULL, PERMISSION_FULL,null); - else if (aclRequestString.equalsIgnoreCase( "bucket-owner-read" )) - { - // Object Owner gets FULL_CONTROL, Bucket Owner gets READ - // This is equivalent to private when used with PUT Bucket - if ( target.equalsIgnoreCase( "SBucket" )) - return new Triple (PERMISSION_FULL,PERMISSION_FULL ,null); - else - return new Triple (PERMISSION_FULL,PERMISSION_READ,ownerID); - } - else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" )) - { - // Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL - // This is equivalent to private when used with PUT Bucket - if ( target.equalsIgnoreCase( "SBucket" )) - return new Triple (PERMISSION_FULL, PERMISSION_FULL, null); - else - return new Triple (PERMISSION_FULL,PERMISSION_FULL, ownerID); - } - else throw new UnsupportedException( "Unknown Canned Access Policy: " + aclRequestString + " is not supported" ); - } - ->>>>>>> 6472e7b... Now really adding the renamed files! -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.model; + +import java.io.Serializable; +import java.util.Date; + +import com.cloud.bridge.service.exception.UnsupportedException; +import com.cloud.bridge.util.OrderedPair; +import com.cloud.bridge.util.Triple; + +/** + * @author John Zucker, Kelven Yang + * A model of stored ACLs to remember the ACL permissions per canonicalUserID per grantee + * Hold the AWS S3 grantee and permission constants. + * + * This class implements two forms of getCannedAccessControls mappings, as static methods, + * + * (a) an OrderedPair which provides a maplet across + * < permission, grantee > + * when given an aclRequestString and a target (i.e. bucket or object), + * + * (b) a Triplet + * < permission1, permission2, symbol > + * when given an aclRequestString, a target (i.e. bucket or object) and the ID of the owner. + */ +public class SAcl implements Serializable { + private static final long serialVersionUID = 7900837117165018850L; + + public static final int GRANTEE_USER = 0; + public static final int GRANTEE_ALLUSERS = 1; + public static final int GRANTEE_AUTHENTICATED = 2; + + public static final int PERMISSION_PASS = -1; // No ACL test required + public static final int PERMISSION_NONE = 0; + public static final int PERMISSION_READ = 1; + public static final int PERMISSION_WRITE = 2; + public static final int PERMISSION_READ_ACL = 4; + public static final int PERMISSION_WRITE_ACL = 8; + public static final int PERMISSION_FULL = (PERMISSION_READ | PERMISSION_WRITE | PERMISSION_READ_ACL | PERMISSION_WRITE_ACL); + + private Long id; + + private String target; + private long targetId; + + private int granteeType; + private String granteeCanonicalId; + + private int permission; + private int grantOrder; + + private Date createTime; + private Date lastModifiedTime; + + public SAcl() { + } + + public Long getId() { + return id; + } + + private void setId(Long id) { + this.id = id; + } + + public String getTarget() { + return target; + } + + public void setTarget(String target) { + this.target = target; + } + + public long getTargetId() { + return targetId; + } + + public void setTargetId(long targetId) { + this.targetId = targetId; + } + + public int getGranteeType() { + return granteeType; + } + + public void setGranteeType(int granteeType) { + this.granteeType = granteeType; + } + + public String getGranteeCanonicalId() { + return granteeCanonicalId; + } + + public void setGranteeCanonicalId(String granteeCanonicalId) { + this.granteeCanonicalId = granteeCanonicalId; + } + + public int getPermission() { + return permission; + } + + public void setPermission(int permission) { + this.permission = permission; + } + + public int getGrantOrder() { + return grantOrder; + } + + public void setGrantOrder(int grantOrder) { + this.grantOrder = grantOrder; + } + + public Date getCreateTime() { + return createTime; + } + + public void setCreateTime(Date createTime) { + this.createTime = createTime; + } + + public Date getLastModifiedTime() { + return lastModifiedTime; + } + + public void setLastModifiedTime(Date lastModifiedTime) { + this.lastModifiedTime = lastModifiedTime; + } + + /** Return an OrderedPair + * < permission, grantee > + * comprising + * a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ, + * SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL + * a grantee - which is one of GRANTEE_ALLUSERS, GRANTEE_AUTHENTICATED, GRANTEE_USER + * + * Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets. + * The ACL request string is treated as a request for a known cannedAccessPolicy + * @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs + * @param target - Either "SBucket" or otherwise assumed to be for a single object item + */ + public static OrderedPair getCannedAccessControls ( String aclRequestString, String target ) + throws UnsupportedException + { + if ( aclRequestString.equalsIgnoreCase( "public-read" )) + // All users granted READ access. + return new OrderedPair (PERMISSION_READ,GRANTEE_ALLUSERS); + else if (aclRequestString.equalsIgnoreCase( "public-read-write" )) + // All users granted READ and WRITE access + return new OrderedPair ((PERMISSION_READ | PERMISSION_WRITE),GRANTEE_ALLUSERS); + else if (aclRequestString.equalsIgnoreCase( "authenticated-read" )) + // Authenticated users have READ access + return new OrderedPair (PERMISSION_READ,GRANTEE_AUTHENTICATED); + else if (aclRequestString.equalsIgnoreCase( "private" )) + // Only Owner gets FULL_CONTROL + return new OrderedPair (PERMISSION_FULL,GRANTEE_USER); + else if (aclRequestString.equalsIgnoreCase( "bucket-owner-read" )) + { + // Object Owner gets FULL_CONTROL, Bucket Owner gets READ + if ( target.equalsIgnoreCase( "SBucket" )) + return new OrderedPair (PERMISSION_READ, GRANTEE_USER); + else + return new OrderedPair (PERMISSION_FULL, GRANTEE_USER); + } + else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" )) + { + // Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL + // This is equivalent to private when used with PUT Bucket + return new OrderedPair (PERMISSION_FULL,GRANTEE_USER); + } + else throw new UnsupportedException( "Unknown Canned Access Policy: " + aclRequestString + " is not supported" ); + } + + /** Return a Triple + * < permission1, permission2, symbol > + * comprising + * two permissions - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ, + * SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL + * permission1 applies to objects, permission2 applies to buckets. + * a symbol to indicate whether the principal is anonymous (i.e. string "A") or authenticated user (i.e. + * string "*") - otherwise null indicates a single ACL for all users. + * + * Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets. + * The ACL request string is treated as a request for a known cannedAccessPolicy + * @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs + * @param target - Either "SBucket" or otherwise assumed to be for a single object item + * @param ownerID - An ID for the owner, if used in place of symbols "A" or "*" + */ + public static Triple getCannedAccessControls ( String aclRequestString, String target, String ownerID ) + throws UnsupportedException + { + if ( aclRequestString.equalsIgnoreCase( "public-read" )) + // Owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ access. + return new Triple (PERMISSION_FULL, PERMISSION_READ,"A"); + else if (aclRequestString.equalsIgnoreCase( "public-read-write" )) + // Owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ and WRITE access + return new Triple (PERMISSION_FULL, (PERMISSION_READ | PERMISSION_WRITE),"A"); + else if (aclRequestString.equalsIgnoreCase( "authenticated-read" )) + // Owner gets FULL_CONTROL and ANY principal authenticated as a registered S3 user (the '*' symbol here) is granted READ access + return new Triple (PERMISSION_FULL, PERMISSION_READ,"*"); + else if (aclRequestString.equalsIgnoreCase( "private" )) + // This is termed the "private" or default ACL, "Owner gets FULL_CONTROL" + return new Triple (PERMISSION_FULL, PERMISSION_FULL,null); + else if (aclRequestString.equalsIgnoreCase( "bucket-owner-read" )) + { + // Object Owner gets FULL_CONTROL, Bucket Owner gets READ + // This is equivalent to private when used with PUT Bucket + if ( target.equalsIgnoreCase( "SBucket" )) + return new Triple (PERMISSION_FULL,PERMISSION_FULL ,null); + else + return new Triple (PERMISSION_FULL,PERMISSION_READ,ownerID); + } + else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" )) + { + // Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL + // This is equivalent to private when used with PUT Bucket + if ( target.equalsIgnoreCase( "SBucket" )) + return new Triple (PERMISSION_FULL, PERMISSION_FULL, null); + else + return new Triple (PERMISSION_FULL,PERMISSION_FULL, ownerID); + } + else throw new UnsupportedException( "Unknown Canned Access Policy: " + aclRequestString + " is not supported" ); + } + +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/model/SBucket.java b/awsapi/src/com/cloud/bridge/model/SBucket.java index 12e5d73e21f..4e045f79d0a 100644 --- a/awsapi/src/com/cloud/bridge/model/SBucket.java +++ b/awsapi/src/com/cloud/bridge/model/SBucket.java @@ -21,9 +21,6 @@ import java.util.HashSet; import java.util.Set; /** -<<<<<<< HEAD - * @author Kelven Yang -======= * @author Kelven Yang, John Zucker * Holds the relation * Id, @@ -33,16 +30,11 @@ import java.util.Set; * CreateTime, * VersioningStatus * For ORM see "com/cloud/bridge/model/SHost.hbm.xml" ->>>>>>> 6472e7b... Now really adding the renamed files! */ public class SBucket implements Serializable { private static final long serialVersionUID = 7430267766019671273L; -<<<<<<< HEAD - public static final int VERSIONING_NULL = 0; // -> initial set, not set to anything yet -======= public static final int VERSIONING_NULL = 0; ->>>>>>> 6472e7b... Now really adding the renamed files! public static final int VERSIONING_ENABLED = 1; public static final int VERSIONING_SUSPENDED = 2; diff --git a/awsapi/src/com/cloud/bridge/model/SObject.java b/awsapi/src/com/cloud/bridge/model/SObject.java index 7ded7582f46..e86bae48477 100644 --- a/awsapi/src/com/cloud/bridge/model/SObject.java +++ b/awsapi/src/com/cloud/bridge/model/SObject.java @@ -22,11 +22,7 @@ import java.util.Iterator; import java.util.Set; /** -<<<<<<< HEAD - * @author Kelven Yang -======= * @author Kelven Yang, John Zucker ->>>>>>> 6472e7b... Now really adding the renamed files! */ public class SObject implements Serializable { private static final long serialVersionUID = 8566744941395660486L; @@ -37,11 +33,7 @@ public class SObject implements Serializable { private String ownerCanonicalId; private int nextSequence; -<<<<<<< HEAD - private String deletionMark; // -> this must also a unique ID to give to the REST client -======= private String deletionMark; // This must also a unique ID to give to the REST client ->>>>>>> 6472e7b... Now really adding the renamed files! private Date createTime; @@ -141,11 +133,7 @@ public class SObject implements Serializable { { SObjectItem item = it.next(); -<<<<<<< HEAD - // -> If versioning is off then return the item with the null version string (if exists) -======= // If versioning is off then return the item with the null version string (if exists) ->>>>>>> 6472e7b... Now really adding the renamed files! // For example, the bucket could have allowed versioning and then it was suspended // If an application wants a specific version it will need to explicitly ask for it try { diff --git a/awsapi/src/com/cloud/bridge/model/SObjectItem.java b/awsapi/src/com/cloud/bridge/model/SObjectItem.java index 1f5a82011ca..b2380443111 100644 --- a/awsapi/src/com/cloud/bridge/model/SObjectItem.java +++ b/awsapi/src/com/cloud/bridge/model/SObjectItem.java @@ -19,11 +19,7 @@ import java.io.Serializable; import java.util.Date; /** -<<<<<<< HEAD - * @author Kelven Yang -======= * @author Kelven Yang, John Zucker ->>>>>>> 6472e7b... Now really adding the renamed files! */ public class SObjectItem implements Serializable { private static final long serialVersionUID = -7351173256185687851L; @@ -80,11 +76,7 @@ public class SObjectItem implements Serializable { } public void setStoredPath(String storedPath) { -<<<<<<< HEAD - this.storedPath = storedPath; -======= this.storedPath = storedPath; // TODO - storedpath holds integer, called from S3Engine.allocObjectItem ->>>>>>> 6472e7b... Now really adding the renamed files! } public long getStoredSize() { diff --git a/awsapi/src/com/cloud/bridge/model/UserCredentials.java b/awsapi/src/com/cloud/bridge/model/UserCredentials.java index 1f3ad121221..9589567f8f5 100644 --- a/awsapi/src/com/cloud/bridge/model/UserCredentials.java +++ b/awsapi/src/com/cloud/bridge/model/UserCredentials.java @@ -66,11 +66,7 @@ public class UserCredentials implements Serializable { if (!(other instanceof UserCredentials)) return false; -<<<<<<< HEAD - // -> the cert id can be null in both or either, since it is only used for the SOAP API -======= // The cert id can be null. The cert is unused in the REST API. ->>>>>>> 6472e7b... Now really adding the renamed files! if ( getAccessKey().equals(((UserCredentials)other).getAccessKey()) && getSecretKey().equals(((UserCredentials)other).getSecretKey())) { @@ -89,11 +85,7 @@ public class UserCredentials implements Serializable { int hashCode = 0; String thisCertId = getCertUniqueId(); -<<<<<<< HEAD - // -> the cert id can be null, since it is only used for the SOAP API -======= // The cert id can be null. The cert is unused in the REST API. ->>>>>>> 6472e7b... Now really adding the renamed files! hashCode = hashCode*17 + getAccessKey().hashCode(); hashCode = hashCode*17 + getSecretKey().hashCode(); if (null != thisCertId) hashCode = hashCode*17 + thisCertId.hashCode(); diff --git a/awsapi/src/com/cloud/bridge/persist/EntityDao.java b/awsapi/src/com/cloud/bridge/persist/EntityDao.java index ac542b5fad7..b8a097f2c12 100644 --- a/awsapi/src/com/cloud/bridge/persist/EntityDao.java +++ b/awsapi/src/com/cloud/bridge/persist/EntityDao.java @@ -24,24 +24,6 @@ import org.hibernate.Session; import com.cloud.bridge.util.QueryHelper; /** -<<<<<<< HEAD - * @author Kelven Yang - */ -public class EntityDao { - private Class clazz; - - private boolean isCloudStackSession = false; - - public EntityDao(Class clazz){ - this(clazz, false); - } - - public EntityDao(Class clazz, boolean isCloudStackSession) { - this.clazz = clazz; - this.isCloudStackSession = isCloudStackSession; - // Note : beginTransaction can be called multiple times - PersistContext.beginTransaction(isCloudStackSession); -======= * @author Kelven Yang, John Zucker * Provide methods for getting, saving, deleting or updating state per session or, in a given session, returnin a List in * response to queryEntities for a particular instantation of the EntityDao generic class, as defined here. @@ -52,6 +34,14 @@ public class EntityDao { public class EntityDao { private Class clazz; + private boolean isCloudStackSession = false; + public EntityDao(Class clazz, boolean isCloudStackSession) { + this.clazz = clazz; + this.isCloudStackSession = isCloudStackSession; + // Note : beginTransaction can be called multiple times + PersistContext.beginTransaction(isCloudStackSession); + } + public EntityDao(Class clazz) { this.clazz = clazz; @@ -59,54 +49,34 @@ public class EntityDao { // "If a new underlying transaction is required, begin the transaction. Otherwise continue the new work in the // context of the existing underlying transaction." from the Hibernate spec PersistContext.beginTransaction(); ->>>>>>> 6472e7b... Now really adding the renamed files! + } @SuppressWarnings("unchecked") public T get(Serializable id) { -<<<<<<< HEAD - Session session = PersistContext.getSession(isCloudStackSession); -======= Session session = PersistContext.getSession(); ->>>>>>> 6472e7b... Now really adding the renamed files! return (T)session.get(clazz, id); } public T save(T entity) { -<<<<<<< HEAD - Session session = PersistContext.getSession(isCloudStackSession); -======= Session session = PersistContext.getSession(); ->>>>>>> 6472e7b... Now really adding the renamed files! session.saveOrUpdate(entity); return entity; } public T update(T entity) { -<<<<<<< HEAD - Session session = PersistContext.getSession(isCloudStackSession); -======= Session session = PersistContext.getSession(); ->>>>>>> 6472e7b... Now really adding the renamed files! session.saveOrUpdate(entity); return entity; } public void delete(T entity) { -<<<<<<< HEAD - Session session = PersistContext.getSession(isCloudStackSession); -======= Session session = PersistContext.getSession(); ->>>>>>> 6472e7b... Now really adding the renamed files! session.delete(entity); } public T queryEntity(String hql, Object[] params) { -<<<<<<< HEAD - Session session = PersistContext.getSession(isCloudStackSession); -======= Session session = PersistContext.getSession(); ->>>>>>> 6472e7b... Now really adding the renamed files! Query query = session.createQuery(hql); query.setMaxResults(1); QueryHelper.bindParameters(query, params); @@ -114,11 +84,7 @@ public class EntityDao { } public List queryEntities(String hql, Object[] params) { -<<<<<<< HEAD - Session session = PersistContext.getSession(isCloudStackSession); -======= Session session = PersistContext.getSession(); ->>>>>>> 6472e7b... Now really adding the renamed files! Query query = session.createQuery(hql); QueryHelper.bindParameters(query, params); @@ -126,11 +92,7 @@ public class EntityDao { } public List queryEntities(String hql, int offset, int limit, Object[] params) { -<<<<<<< HEAD - Session session = PersistContext.getSession(isCloudStackSession); -======= Session session = PersistContext.getSession(); ->>>>>>> 6472e7b... Now really adding the renamed files! Query query = session.createQuery(hql); QueryHelper.bindParameters(query, params); query.setFirstResult(offset); @@ -139,11 +101,7 @@ public class EntityDao { } public int executeUpdate(String hql, Object[] params) { -<<<<<<< HEAD - Session session = PersistContext.getSession(isCloudStackSession); -======= Session session = PersistContext.getSession(); ->>>>>>> 6472e7b... Now really adding the renamed files! Query query = session.createQuery(hql); QueryHelper.bindParameters(query, params); diff --git a/awsapi/src/com/cloud/bridge/persist/PersistContext.java b/awsapi/src/com/cloud/bridge/persist/PersistContext.java index 0674ed388b0..a7e43005301 100644 --- a/awsapi/src/com/cloud/bridge/persist/PersistContext.java +++ b/awsapi/src/com/cloud/bridge/persist/PersistContext.java @@ -28,12 +28,8 @@ import org.hibernate.Session; import org.hibernate.Transaction; import com.cloud.bridge.util.CloudSessionFactory; -<<<<<<< HEAD import com.cloud.bridge.util.CloudStackSessionFactory; -import com.cloud.bridge.util.Tuple; -======= import com.cloud.bridge.util.OrderedPair; ->>>>>>> 6472e7b... Now really adding the renamed files! /** * @author Kelven Yang @@ -53,16 +49,11 @@ public class PersistContext { protected final static Logger logger = Logger.getLogger(PersistContext.class); private static final CloudSessionFactory sessionFactory; -<<<<<<< HEAD - -======= ->>>>>>> 6472e7b... Now really adding the renamed files! private static final ThreadLocal threadSession = new ThreadLocal(); private static final ThreadLocal threadTransaction = new ThreadLocal(); private static final ThreadLocal> threadStore = new ThreadLocal>(); -<<<<<<< HEAD private static final CloudStackSessionFactory cloudStackSessionFactory; private static final ThreadLocal threadCloudStackSession = new ThreadLocal(); private static final ThreadLocal threadCloudStackTransaction = new ThreadLocal(); @@ -71,18 +62,12 @@ public class PersistContext { try { sessionFactory = CloudSessionFactory.getInstance(); cloudStackSessionFactory = CloudStackSessionFactory.getInstance(); -======= - static { - try { - sessionFactory = CloudSessionFactory.getInstance(); ->>>>>>> 6472e7b... Now really adding the renamed files! } catch(HibernateException e) { logger.error("Exception " + e.getMessage(), e); throw new PersistException(e); } } -<<<<<<< HEAD public static Session getSession(boolean cloudStackSession) { Session s = null; try { @@ -150,48 +135,12 @@ public class PersistContext { }else{ threadTransaction.set(tx); } -======= - public static Session getSession() { - Session s = threadSession.get(); - try { - if(s == null) { - s = sessionFactory.openSession(); - threadSession.set(s); - } - } catch(HibernateException e) { - logger.error("Exception " + e.getMessage(), e); - throw new PersistException(e); - } - return s; - } - - public static void closeSession() { - try { - Session s = (Session) threadSession.get(); - threadSession.set(null); - - if (s != null && s.isOpen()) - s.close(); - } catch(HibernateException e) { - logger.error("Exception " + e.getMessage(), e); - throw new PersistException(e); - } - } - - public static void beginTransaction() { - Transaction tx = threadTransaction.get(); - try { - if (tx == null) { - tx = getSession().beginTransaction(); - threadTransaction.set(tx); ->>>>>>> 6472e7b... Now really adding the renamed files! } } catch(HibernateException e) { logger.error("Exception " + e.getMessage(), e); throw new PersistException(e); } } -<<<<<<< HEAD public static void beginTransaction() { beginTransaction(false); @@ -219,24 +168,10 @@ public class PersistContext { logger.error("Exception " + e.getMessage(), e); rollbackTransaction(cloudStackTxn); -======= - - public static void commitTransaction() { - Transaction tx = threadTransaction.get(); - try { - if ( tx != null && !tx.wasCommitted() && !tx.wasRolledBack() ) - tx.commit(); - threadTransaction.set(null); - } catch (HibernateException e) { - logger.error("Exception " + e.getMessage(), e); - - rollbackTransaction(); ->>>>>>> 6472e7b... Now really adding the renamed files! throw new PersistException(e); } } -<<<<<<< HEAD public static void commitTransaction() { commitTransaction(false); } @@ -252,12 +187,6 @@ public class PersistContext { threadTransaction.set(null); } try { -======= - public static void rollbackTransaction() { - Transaction tx = (Transaction) threadTransaction.get(); - try { - threadTransaction.set(null); ->>>>>>> 6472e7b... Now really adding the renamed files! if ( tx != null && !tx.wasCommitted() && !tx.wasRolledBack() ) { tx.rollback(); } @@ -265,7 +194,6 @@ public class PersistContext { logger.error("Exception " + e.getMessage(), e); throw new PersistException(e); } finally { -<<<<<<< HEAD closeSession(cloudStackTxn); } } @@ -274,12 +202,6 @@ public class PersistContext { rollbackTransaction(false); } -======= - closeSession(); - } - } - ->>>>>>> 6472e7b... Now really adding the renamed files! public static void flush() { commitTransaction(); beginTransaction(); @@ -294,24 +216,15 @@ public class PersistContext { * @return */ public static boolean acquireNamedLock(String name, int timeoutSeconds) { -<<<<<<< HEAD - Connection conn = getJDBCConnection(name, true); - if(conn == null) { -======= Connection jdbcConnection = getJDBCConnection(name, true); if(jdbcConnection == null) { ->>>>>>> 6472e7b... Now really adding the renamed files! logger.warn("Unable to acquire named lock connection for named lock: " + name); return false; } PreparedStatement pstmt = null; try { -<<<<<<< HEAD - pstmt = conn.prepareStatement("SELECT COALESCE(GET_LOCK(?, ?),0)"); -======= pstmt = jdbcConnection.prepareStatement("SELECT COALESCE(GET_LOCK(?, ?),0)"); ->>>>>>> 6472e7b... Now really adding the renamed files! pstmt.setString(1, name); pstmt.setInt(2, timeoutSeconds); @@ -343,24 +256,15 @@ public class PersistContext { } public static boolean releaseNamedLock(String name) { -<<<<<<< HEAD - Connection conn = getJDBCConnection(name, false); - if(conn == null) { -======= Connection jdbcConnection = getJDBCConnection(name, false); if(jdbcConnection == null) { ->>>>>>> 6472e7b... Now really adding the renamed files! logger.error("Unable to acquire DB connection for global lock system"); return false; } PreparedStatement pstmt = null; try { -<<<<<<< HEAD - pstmt = conn.prepareStatement("SELECT COALESCE(RELEASE_LOCK(?), 0)"); -======= pstmt = jdbcConnection.prepareStatement("SELECT COALESCE(RELEASE_LOCK(?), 0)"); ->>>>>>> 6472e7b... Now really adding the renamed files! pstmt.setString(1, name); ResultSet rs = pstmt.executeQuery(); if(rs != null && rs.first()) @@ -379,11 +283,7 @@ public class PersistContext { @SuppressWarnings("deprecation") private static Connection getJDBCConnection(String name, boolean allocNew) { String registryKey = "JDBC-Connection." + name; -<<<<<<< HEAD - Tuple info = (Tuple)getThreadStoreObject(registryKey); -======= OrderedPair info = (OrderedPair)getThreadStoreObject(registryKey); ->>>>>>> 6472e7b... Now really adding the renamed files! if(info == null && allocNew) { Session session = sessionFactory.openSession(); Connection connection = session.connection(); @@ -405,11 +305,7 @@ public class PersistContext { return null; } -<<<<<<< HEAD - registerThreadStoreObject(registryKey, new Tuple(session, connection)); -======= registerThreadStoreObject(registryKey, new OrderedPair(session, connection)); ->>>>>>> 6472e7b... Now really adding the renamed files! return connection; } @@ -421,11 +317,7 @@ public class PersistContext { private static void releaseJDBCConnection(String name) { String registryKey = "JDBC-Connection." + name; -<<<<<<< HEAD - Tuple info = (Tuple)unregisterThreadStoreObject(registryKey); -======= OrderedPair info = (OrderedPair)unregisterThreadStoreObject(registryKey); ->>>>>>> 6472e7b... Now really adding the renamed files! if(info != null) { try { info.getSecond().close(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDao.java b/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDao.java index 2240412e3ab..acd441e3b73 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDao.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDao.java @@ -33,7 +33,6 @@ import com.cloud.bridge.util.ConfigurationHelper; public class BucketPolicyDao { public static final Logger logger = Logger.getLogger(BucketPolicyDao.class); -<<<<<<< HEAD private Connection conn = null; private String dbName = null; private String dbUser = null; @@ -44,16 +43,6 @@ public class BucketPolicyDao { public BucketPolicyDao() { File propertiesFile = ConfigurationHelper.findConfigurationFile("db.properties"); -======= - private Connection jdbcConnection = null; - private String dbName = null; - private String dbUser = null; - private String dbPassword = null; - - public BucketPolicyDao() - { - File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); ->>>>>>> 6472e7b... Now really adding the renamed files! Properties EC2Prop = null; if (null != propertiesFile) { @@ -65,17 +54,11 @@ public class BucketPolicyDao { } catch (IOException e) { logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); } -<<<<<<< HEAD dbHost = EC2Prop.getProperty( "db.cloud.host" ); dbName = EC2Prop.getProperty( "db.awsapi.name" ); dbUser = EC2Prop.getProperty( "db.cloud.username" ); dbPassword = EC2Prop.getProperty( "db.cloud.password" ); dbPort = EC2Prop.getProperty( "db.cloud.port" ); -======= - dbName = EC2Prop.getProperty( "dbName" ); - dbUser = EC2Prop.getProperty( "dbUser" ); - dbPassword = EC2Prop.getProperty( "dbPassword" ); ->>>>>>> 6472e7b... Now really adding the renamed files! } } @@ -86,11 +69,7 @@ public class BucketPolicyDao { openConnection(); try { -<<<<<<< HEAD statement = conn.prepareStatement ( "INSERT INTO bucket_policies (BucketName, OwnerCanonicalID, Policy) VALUES (?,?,?)" ); -======= - statement = jdbcConnection.prepareStatement ( "INSERT INTO bucket_policies (BucketName, OwnerCanonicalID, Policy) VALUES (?,?,?)" ); ->>>>>>> 6472e7b... Now really adding the renamed files! statement.setString( 1, bucketName ); statement.setString( 2, owner ); statement.setString( 3, policy ); @@ -114,11 +93,7 @@ public class BucketPolicyDao { openConnection(); try { -<<<<<<< HEAD statement = conn.prepareStatement ( "SELECT OwnerCanonicalID FROM bucket_policies WHERE BucketName=?" ); -======= - statement = jdbcConnection.prepareStatement ( "SELECT OwnerCanonicalID FROM bucket_policies WHERE BucketName=?" ); ->>>>>>> 6472e7b... Now really adding the renamed files! statement.setString( 1, bucketName ); ResultSet rs = statement.executeQuery(); if (rs.next()) owner = rs.getString( "OwnerCanonicalID" ); @@ -138,11 +113,7 @@ public class BucketPolicyDao { openConnection(); try { -<<<<<<< HEAD statement = conn.prepareStatement ( "SELECT Policy FROM bucket_policies WHERE BucketName=?" ); -======= - statement = jdbcConnection.prepareStatement ( "SELECT Policy FROM bucket_policies WHERE BucketName=?" ); ->>>>>>> 6472e7b... Now really adding the renamed files! statement.setString( 1, bucketName ); ResultSet rs = statement.executeQuery(); if (rs.next()) policy = rs.getString( "Policy" ); @@ -161,11 +132,7 @@ public class BucketPolicyDao { openConnection(); try { -<<<<<<< HEAD statement = conn.prepareStatement ( "DELETE FROM bucket_policies WHERE BucketName=?" ); -======= - statement = jdbcConnection.prepareStatement ( "DELETE FROM bucket_policies WHERE BucketName=?" ); ->>>>>>> 6472e7b... Now really adding the renamed files! statement.setString( 1, bucketName ); int count = statement.executeUpdate(); statement.close(); @@ -178,25 +145,14 @@ public class BucketPolicyDao { private void openConnection() throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { -<<<<<<< HEAD if (null == conn) { Class.forName( "com.mysql.jdbc.Driver" ).newInstance(); conn = DriverManager.getConnection( "jdbc:mysql://" + dbHost + ":" + dbPort + "/" + dbName, dbUser, dbPassword ); -======= - if (null == jdbcConnection) { - Class.forName( "com.mysql.jdbc.Driver" ).newInstance(); - jdbcConnection = DriverManager.getConnection( "jdbc:mysql://localhost:3306/"+dbName, dbUser, dbPassword ); ->>>>>>> 6472e7b... Now really adding the renamed files! } } private void closeConnection() throws SQLException { -<<<<<<< HEAD if (null != conn) conn.close(); conn = null; -======= - if (null != jdbcConnection) jdbcConnection.close(); - jdbcConnection = null; ->>>>>>> 6472e7b... Now really adding the renamed files! } } diff --git a/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java b/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java index 6c6b1e0404f..191cdd4fb96 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java @@ -37,11 +37,7 @@ import com.cloud.bridge.service.core.s3.S3MetaDataEntry; import com.cloud.bridge.service.core.s3.S3MultipartPart; import com.cloud.bridge.service.core.s3.S3MultipartUpload; import com.cloud.bridge.util.ConfigurationHelper; -<<<<<<< HEAD -import com.cloud.bridge.util.Tuple; -======= import com.cloud.bridge.util.OrderedPair; ->>>>>>> 6472e7b... Now really adding the renamed files! public class MultipartLoadDao { public static final Logger logger = Logger.getLogger(MultipartLoadDao.class); @@ -50,21 +46,12 @@ public class MultipartLoadDao { private String dbName = null; private String dbUser = null; private String dbPassword = null; -<<<<<<< HEAD private String dbHost = null; private String dbPort = null; public MultipartLoadDao() { File propertiesFile = ConfigurationHelper.findConfigurationFile("db.properties"); Properties EC2Prop = null; -======= - - public MultipartLoadDao() { - File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); - Properties EC2Prop = null; - - // The settings for the CLOUDBRIDGE database are shared with the EC2 API ->>>>>>> 6472e7b... Now really adding the renamed files! if (null != propertiesFile) { EC2Prop = new Properties(); @@ -75,17 +62,11 @@ public class MultipartLoadDao { } catch (IOException e) { logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); } -<<<<<<< HEAD dbHost = EC2Prop.getProperty( "db.cloud.host" ); dbName = EC2Prop.getProperty( "db.awsapi.name" ); dbUser = EC2Prop.getProperty( "db.cloud.username" ); dbPassword = EC2Prop.getProperty( "db.cloud.password" ); dbPort = EC2Prop.getProperty( "db.cloud.port" ); -======= - dbName = EC2Prop.getProperty( "dbName" ); - dbUser = EC2Prop.getProperty( "dbUser" ); - dbPassword = EC2Prop.getProperty( "dbPassword" ); ->>>>>>> 6472e7b... Now really adding the renamed files! } } @@ -97,11 +78,7 @@ public class MultipartLoadDao { * @return creator of the multipart upload, and NameKey of upload * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException */ -<<<<<<< HEAD - public Tuple multipartExits( int uploadId ) -======= public OrderedPair multipartExits( int uploadId ) ->>>>>>> 6472e7b... Now really adding the renamed files! throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { PreparedStatement statement = null; @@ -116,11 +93,7 @@ public class MultipartLoadDao { if ( rs.next()) { accessKey = rs.getString( "AccessKey" ); nameKey = rs.getString( "NameKey" ); -<<<<<<< HEAD - return new Tuple( accessKey, nameKey ); -======= return new OrderedPair( accessKey, nameKey ); ->>>>>>> 6472e7b... Now really adding the renamed files! } else return null; @@ -365,17 +338,10 @@ public class MultipartLoadDao { * @param prefix - can be null * @param keyMarker - can be null * @param uploadIdMarker - can be null, should only be defined if keyMarker is not-null -<<<<<<< HEAD - * @return Tuple - * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - */ - public Tuple getInitiatedUploads( String bucketName, int maxParts, String prefix, String keyMarker, String uploadIdMarker ) -======= * @return OrderedPair * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException */ public OrderedPair getInitiatedUploads( String bucketName, int maxParts, String prefix, String keyMarker, String uploadIdMarker ) ->>>>>>> 6472e7b... Now really adding the renamed files! throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { S3MultipartUpload[] inProgress = new S3MultipartUpload[maxParts]; @@ -421,11 +387,7 @@ public class MultipartLoadDao { statement.close(); if (i < maxParts) inProgress = (S3MultipartUpload[])resizeArray(inProgress,i); -<<<<<<< HEAD - return new Tuple(inProgress, isTruncated); -======= return new OrderedPair(inProgress, isTruncated); ->>>>>>> 6472e7b... Now really adding the renamed files! } finally { closeConnection(); @@ -470,11 +432,7 @@ public class MultipartLoadDao { parts[i] = new S3MultipartPart(); parts[i].setPartNumber( rs.getInt( "partNumber" )); -<<<<<<< HEAD - parts[i].setEtag( rs.getString( "MD5" )); -======= parts[i].setEtag( rs.getString( "MD5" ).toLowerCase()); ->>>>>>> 6472e7b... Now really adding the renamed files! parts[i].setLastModified( tod ); parts[i].setSize( rs.getInt( "StoredSize" )); parts[i].setPath( rs.getString( "StoredPath" )); @@ -556,11 +514,7 @@ public class MultipartLoadDao { throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { if (null == conn) { Class.forName( "com.mysql.jdbc.Driver" ).newInstance(); -<<<<<<< HEAD conn = DriverManager.getConnection( "jdbc:mysql://" + dbHost + ":" + dbPort + "/" + dbName, dbUser, dbPassword ); -======= - conn = DriverManager.getConnection( "jdbc:mysql://localhost:3306/"+dbName, dbUser, dbPassword ); ->>>>>>> 6472e7b... Now really adding the renamed files! } } diff --git a/awsapi/src/com/cloud/bridge/persist/dao/OfferingDao.java b/awsapi/src/com/cloud/bridge/persist/dao/OfferingDao.java index 73b508b4479..c8433cfd376 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/OfferingDao.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/OfferingDao.java @@ -30,7 +30,6 @@ import org.apache.log4j.Logger; import com.cloud.bridge.util.ConfigurationHelper; -<<<<<<< HEAD public class OfferingDao extends BaseDao { public static final Logger logger = Logger.getLogger(OfferingDao.class); @@ -39,36 +38,6 @@ public class OfferingDao extends BaseDao { public OfferingDao() { -======= -public class OfferingDao { - public static final Logger logger = Logger.getLogger(OfferingDao.class); - - private Connection conn = null; - private String dbName = null; - private String dbUser = null; - private String dbPassword = null; - - public OfferingDao() - { - File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); - Properties EC2Prop = null; - - // The settings for the CLOUDBRIDGE database are shared with the EC2 API - - if (null != propertiesFile) { - EC2Prop = new Properties(); - try { - EC2Prop.load( new FileInputStream( propertiesFile )); - } catch (FileNotFoundException e) { - logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); - } catch (IOException e) { - logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); - } - dbName = EC2Prop.getProperty( "dbName" ); - dbUser = EC2Prop.getProperty( "dbUser" ); - dbPassword = EC2Prop.getProperty( "dbPassword" ); - } ->>>>>>> 6472e7b... Now really adding the renamed files! } public int getOfferingCount() @@ -186,11 +155,7 @@ public class OfferingDao { { if (null == conn) { Class.forName( "com.mysql.jdbc.Driver" ).newInstance(); -<<<<<<< HEAD conn = DriverManager.getConnection( "jdbc:mysql://" + dbHost + ":" + dbPort + "/" + awsapi_dbName, dbUser, dbPassword ); -======= - conn = DriverManager.getConnection( "jdbc:mysql://localhost:3306/"+dbName, dbUser, dbPassword ); ->>>>>>> 6472e7b... Now really adding the renamed files! } } diff --git a/awsapi/src/com/cloud/bridge/persist/dao/SMetaDao.java b/awsapi/src/com/cloud/bridge/persist/dao/SMetaDao.java index 50adf07867a..4a62a57902c 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/SMetaDao.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/SMetaDao.java @@ -23,11 +23,7 @@ import com.cloud.bridge.persist.PersistContext; import com.cloud.bridge.service.core.s3.S3MetaDataEntry; /** -<<<<<<< HEAD - * @author Kelven Yang -======= * @author Kelven Yang, John Zucker ->>>>>>> 6472e7b... Now really adding the renamed files! */ public class SMetaDao extends EntityDao { public SMetaDao() { @@ -50,11 +46,7 @@ public class SMetaDao extends EntityDao { } public void save(String target, long targetId, S3MetaDataEntry[] entries) { -<<<<<<< HEAD - // -> the target's meta data are being redefined -======= // To redefine the target's metadaa ->>>>>>> 6472e7b... Now really adding the renamed files! executeUpdate("delete from SMeta where target=? and targetId=?", new Object[] { target, new Long(targetId)}); if(entries != null) { diff --git a/awsapi/src/com/cloud/bridge/persist/dao/UserCredentialsDao.java b/awsapi/src/com/cloud/bridge/persist/dao/UserCredentialsDao.java index f20d56200ee..7795c55ce3d 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/UserCredentialsDao.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/UserCredentialsDao.java @@ -15,32 +15,17 @@ */ package com.cloud.bridge.persist.dao; -<<<<<<< HEAD -import java.sql.*; - -======= import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.sql.*; import java.util.Properties; ->>>>>>> 6472e7b... Now really adding the renamed files! import org.apache.log4j.Logger; import com.cloud.bridge.model.UserCredentials; import com.cloud.bridge.service.exception.NoSuchObjectException; -<<<<<<< HEAD - - -public class UserCredentialsDao extends BaseDao { - public static final Logger logger = Logger.getLogger(UserCredentialsDao.class); - - private Connection conn = null; - - public UserCredentialsDao() { -======= import com.cloud.bridge.util.ConfigurationHelper; @@ -73,7 +58,6 @@ public class UserCredentialsDao { dbUser = EC2Prop.getProperty( "dbUser" ); dbPassword = EC2Prop.getProperty( "dbPassword" ); } ->>>>>>> 6472e7b... Now really adding the renamed files! } public void setUserKeys( String cloudAccessKey, String cloudSecretKey ) @@ -175,11 +159,7 @@ public class UserCredentialsDao { throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { if (null == conn) { Class.forName( "com.mysql.jdbc.Driver" ).newInstance(); -<<<<<<< HEAD - conn = DriverManager.getConnection( "jdbc:mysql://" + dbHost + "/" + awsapi_dbName, dbUser, dbPassword ); -======= conn = DriverManager.getConnection( "jdbc:mysql://" + dbHost + "/" + dbName, dbUser, dbPassword ); ->>>>>>> 6472e7b... Now really adding the renamed files! } } diff --git a/awsapi/src/com/cloud/bridge/service/EC2MainServlet.java b/awsapi/src/com/cloud/bridge/service/EC2MainServlet.java index 3b2fea71169..e647970cd7b 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2MainServlet.java +++ b/awsapi/src/com/cloud/bridge/service/EC2MainServlet.java @@ -9,11 +9,8 @@ import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -<<<<<<< HEAD import com.cloud.bridge.persist.PersistContext; import com.cloud.bridge.persist.dao.CloudStackConfigurationDao; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! import com.cloud.bridge.persist.dao.UserCredentialsDao; import com.cloud.bridge.util.ConfigurationHelper; @@ -23,18 +20,14 @@ public class EC2MainServlet extends HttpServlet{ public static final String EC2_REST_SERVLET_PATH="/rest/AmazonEC2/"; public static final String EC2_SOAP_SERVLET_PATH="/services/AmazonEC2/"; -<<<<<<< HEAD public static final String ENABLE_EC2_API="enable.ec2.api"; private static boolean isEC2APIEnabled = false; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! /** * We build the path to where the keystore holding the WS-Security X509 certificates * are stored. */ public void init( ServletConfig config ) throws ServletException { -<<<<<<< HEAD try{ ConfigurationHelper.preConfigureConfigPathFromServletContext(config.getServletContext()); UserCredentialsDao.preCheckTableExistence(); @@ -49,10 +42,6 @@ public class EC2MainServlet extends HttpServlet{ PersistContext.closeSession(true); } -======= - ConfigurationHelper.preConfigureConfigPathFromServletContext(config.getServletContext()); - UserCredentialsDao.preCheckTableExistence(); ->>>>>>> 6472e7b... Now really adding the renamed files! } protected void doGet(HttpServletRequest req, HttpServletResponse resp) { @@ -64,7 +53,6 @@ public class EC2MainServlet extends HttpServlet{ } protected void doGetOrPost(HttpServletRequest request, HttpServletResponse response) { -<<<<<<< HEAD String action = request.getParameter( "Action" ); if(!isEC2APIEnabled){ @@ -72,10 +60,6 @@ public class EC2MainServlet extends HttpServlet{ } if(action != null){ -======= - String action = request.getParameter( "Action" ); - if(action!=null){ ->>>>>>> 6472e7b... Now really adding the renamed files! //We presume it's a Query/Rest call try { RequestDispatcher dispatcher = request.getRequestDispatcher(EC2_REST_SERVLET_PATH); @@ -97,4 +81,4 @@ public class EC2MainServlet extends HttpServlet{ } } -} +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java b/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java index 5abdf603dcc..2033fd55266 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java +++ b/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java @@ -1,1861 +1,1827 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.security.KeyStore; -import java.security.SignatureException; -import java.security.cert.Certificate; -import java.security.cert.CertificateFactory; -import java.sql.SQLException; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.Enumeration; -import java.util.List; -import java.util.Properties; -import java.util.UUID; - -import javax.servlet.ServletConfig; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.xml.stream.XMLOutputFactory; -import javax.xml.stream.XMLStreamException; -import javax.xml.stream.XMLStreamWriter; - -import org.apache.axiom.om.OMAbstractFactory; -import org.apache.axiom.om.OMFactory; -import org.apache.axis2.AxisFault; -import org.apache.axis2.databinding.ADBBean; -import org.apache.axis2.databinding.ADBException; -import org.apache.axis2.databinding.utils.writer.MTOMAwareXMLSerializer; -import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; - -import com.amazon.ec2.AllocateAddressResponse; -import com.amazon.ec2.AssociateAddressResponse; -import com.amazon.ec2.AttachVolumeResponse; -import com.amazon.ec2.AuthorizeSecurityGroupIngressResponse; -import com.amazon.ec2.CreateImageResponse; -import com.amazon.ec2.CreateKeyPairResponse; -import com.amazon.ec2.CreateSecurityGroupResponse; -import com.amazon.ec2.CreateSnapshotResponse; -import com.amazon.ec2.CreateVolumeResponse; -import com.amazon.ec2.DeleteKeyPairResponse; -import com.amazon.ec2.DeleteSecurityGroupResponse; -import com.amazon.ec2.DeleteSnapshotResponse; -import com.amazon.ec2.DeleteVolumeResponse; -import com.amazon.ec2.DeregisterImageResponse; -import com.amazon.ec2.DescribeAvailabilityZonesResponse; -import com.amazon.ec2.DescribeImageAttributeResponse; -import com.amazon.ec2.DescribeImagesResponse; -import com.amazon.ec2.DescribeInstanceAttributeResponse; -import com.amazon.ec2.DescribeInstancesResponse; -import com.amazon.ec2.DescribeKeyPairsResponse; -import com.amazon.ec2.DescribeSecurityGroupsResponse; -import com.amazon.ec2.DescribeSnapshotsResponse; -import com.amazon.ec2.DescribeVolumesResponse; -import com.amazon.ec2.DetachVolumeResponse; -import com.amazon.ec2.DisassociateAddressResponse; -import com.amazon.ec2.GetPasswordDataResponse; -import com.amazon.ec2.ImportKeyPairResponse; -import com.amazon.ec2.ModifyImageAttributeResponse; -import com.amazon.ec2.RebootInstancesResponse; -import com.amazon.ec2.RegisterImageResponse; -import com.amazon.ec2.ReleaseAddressResponse; -import com.amazon.ec2.ResetImageAttributeResponse; -import com.amazon.ec2.RevokeSecurityGroupIngressResponse; -import com.amazon.ec2.RunInstancesResponse; -import com.amazon.ec2.StartInstancesResponse; -import com.amazon.ec2.StopInstancesResponse; -import com.amazon.ec2.TerminateInstancesResponse; -import com.cloud.bridge.model.UserCredentials; -<<<<<<< HEAD -import com.cloud.bridge.persist.PersistContext; -import com.cloud.bridge.persist.dao.OfferingDao; -import com.cloud.bridge.persist.dao.UserCredentialsDao; -======= -import com.cloud.bridge.persist.dao.OfferingDao; -import com.cloud.bridge.persist.dao.UserCredentialsDao; -import com.cloud.bridge.service.controller.s3.ServiceProvider; ->>>>>>> 6472e7b... Now really adding the renamed files! -import com.cloud.bridge.service.core.ec2.EC2AssociateAddress; -import com.cloud.bridge.service.core.ec2.EC2AuthorizeRevokeSecurityGroup; -import com.cloud.bridge.service.core.ec2.EC2CreateImage; -import com.cloud.bridge.service.core.ec2.EC2CreateKeyPair; -import com.cloud.bridge.service.core.ec2.EC2CreateVolume; -import com.cloud.bridge.service.core.ec2.EC2DeleteKeyPair; -import com.cloud.bridge.service.core.ec2.EC2DescribeAddresses; -import com.cloud.bridge.service.core.ec2.EC2DescribeAvailabilityZones; -import com.cloud.bridge.service.core.ec2.EC2DescribeImages; -import com.cloud.bridge.service.core.ec2.EC2DescribeInstances; -import com.cloud.bridge.service.core.ec2.EC2DescribeKeyPairs; -import com.cloud.bridge.service.core.ec2.EC2DescribeSecurityGroups; -import com.cloud.bridge.service.core.ec2.EC2DescribeSnapshots; -import com.cloud.bridge.service.core.ec2.EC2DescribeVolumes; -import com.cloud.bridge.service.core.ec2.EC2DisassociateAddress; -import com.cloud.bridge.service.core.ec2.EC2Engine; -import com.cloud.bridge.service.core.ec2.EC2Filter; -import com.cloud.bridge.service.core.ec2.EC2GroupFilterSet; -import com.cloud.bridge.service.core.ec2.EC2Image; -import com.cloud.bridge.service.core.ec2.EC2ImportKeyPair; -import com.cloud.bridge.service.core.ec2.EC2InstanceFilterSet; -import com.cloud.bridge.service.core.ec2.EC2IpPermission; -import com.cloud.bridge.service.core.ec2.EC2KeyPairFilterSet; -import com.cloud.bridge.service.core.ec2.EC2RebootInstances; -import com.cloud.bridge.service.core.ec2.EC2RegisterImage; -import com.cloud.bridge.service.core.ec2.EC2ReleaseAddress; -import com.cloud.bridge.service.core.ec2.EC2RunInstances; -import com.cloud.bridge.service.core.ec2.EC2SecurityGroup; -import com.cloud.bridge.service.core.ec2.EC2SnapshotFilterSet; -import com.cloud.bridge.service.core.ec2.EC2StartInstances; -import com.cloud.bridge.service.core.ec2.EC2StopInstances; -import com.cloud.bridge.service.core.ec2.EC2Volume; -import com.cloud.bridge.service.core.ec2.EC2VolumeFilterSet; -import com.cloud.bridge.service.exception.EC2ServiceException; -import com.cloud.bridge.service.exception.NoSuchObjectException; -import com.cloud.bridge.service.exception.PermissionDeniedException; -import com.cloud.bridge.service.exception.EC2ServiceException.ClientError; -import com.cloud.bridge.util.AuthenticationUtils; -import com.cloud.bridge.util.ConfigurationHelper; -import com.cloud.bridge.util.EC2RestAuth; -<<<<<<< HEAD -import com.cloud.stack.models.CloudStackAccount; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - - -public class EC2RestServlet extends HttpServlet { - - private static final long serialVersionUID = -6168996266762804888L; - - public static final Logger logger = Logger.getLogger(EC2RestServlet.class); - - private OMFactory factory = OMAbstractFactory.getOMFactory(); - private XMLOutputFactory xmlOutFactory = XMLOutputFactory.newInstance(); - - private String pathToKeystore = null; - private String keystorePassword = null; - private String wsdlVersion = null; -<<<<<<< HEAD - private String version = null; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - - boolean debug=true; - - - /** - * We build the path to where the keystore holding the WS-Security X509 certificates - * are stored. - */ - @Override - public void init( ServletConfig config ) throws ServletException { - File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); - Properties EC2Prop = null; - - if (null != propertiesFile) { - logger.info("Use EC2 properties file: " + propertiesFile.getAbsolutePath()); - EC2Prop = new Properties(); - try { - EC2Prop.load( new FileInputStream( propertiesFile )); - } catch (FileNotFoundException e) { - logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); - } catch (IOException e) { - logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); - } - String keystore = EC2Prop.getProperty( "keystore" ); - keystorePassword = EC2Prop.getProperty( "keystorePass" ); - wsdlVersion = EC2Prop.getProperty( "WSDLVersion", "2009-11-30" ); -<<<<<<< HEAD - version = EC2Prop.getProperty( "cloudbridgeVersion", "UNKNOWN VERSION" ); -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - - String installedPath = System.getenv("CATALINA_HOME"); - if (installedPath == null) installedPath = System.getenv("CATALINA_BASE"); - if (installedPath == null) installedPath = System.getProperty("catalina.home"); - String webappPath = config.getServletContext().getRealPath("/"); - //pathToKeystore = new String( installedPath + File.separator + "webapps" + File.separator + webappName + File.separator + "WEB-INF" + File.separator + "classes" + File.separator + keystore ); - pathToKeystore = new String( webappPath + "WEB-INF" + File.separator + "classes" + File.separator + keystore ); - } - } - - @Override - protected void doGet(HttpServletRequest req, HttpServletResponse resp) { - doGetOrPost(req, resp); - } - - @Override - protected void doPost(HttpServletRequest req, HttpServletResponse resp) { - doGetOrPost(req, resp); - } - - protected void doGetOrPost(HttpServletRequest request, HttpServletResponse response) { - - if(debug){ - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.request_uri: "+request.getAttribute("javax.servlet.forward.request_uri")); - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.context_path: "+request.getAttribute("javax.servlet.forward.context_path")); - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.servlet_path: "+request.getAttribute("javax.servlet.forward.servlet_path")); - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.path_info: "+request.getAttribute("javax.servlet.forward.path_info")); - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.query_string: "+request.getAttribute("javax.servlet.forward.query_string")); - - } - - - String action = request.getParameter( "Action" ); - logRequest(request); - - // -> unauthenticated calls, should still be done over HTTPS - if (action.equalsIgnoreCase( "SetUserKeys" )) { - setUserKeys(request, response); - return; - } - - if (action.equalsIgnoreCase( "CloudEC2Version" )) { - cloudEC2Version(request, response); - return; - } - - // -> authenticated calls - try { - if (!authenticateRequest( request, response )) return; - - if (action.equalsIgnoreCase( "AllocateAddress" )) allocateAddress(request, response); - else if (action.equalsIgnoreCase( "AssociateAddress" )) associateAddress(request, response); - else if (action.equalsIgnoreCase( "AttachVolume" )) attachVolume(request, response ); - else if (action.equalsIgnoreCase( "AuthorizeSecurityGroupIngress" )) authorizeSecurityGroupIngress(request, response); - else if (action.equalsIgnoreCase( "CreateImage" )) createImage(request, response); - else if (action.equalsIgnoreCase( "CreateSecurityGroup" )) createSecurityGroup(request, response); - else if (action.equalsIgnoreCase( "CreateSnapshot" )) createSnapshot(request, response); - else if (action.equalsIgnoreCase( "CreateVolume" )) createVolume(request, response); - else if (action.equalsIgnoreCase( "DeleteSecurityGroup" )) deleteSecurityGroup(request, response); - else if (action.equalsIgnoreCase( "DeleteSnapshot" )) deleteSnapshot(request, response); - else if (action.equalsIgnoreCase( "DeleteVolume" )) deleteVolume(request, response); - else if (action.equalsIgnoreCase( "DeregisterImage" )) deregisterImage(request, response); - else if (action.equalsIgnoreCase( "DescribeAddresses" )) describeAddresses(request, response); - else if (action.equalsIgnoreCase( "DescribeAvailabilityZones" )) describeAvailabilityZones(request, response); - else if (action.equalsIgnoreCase( "DescribeImageAttribute" )) describeImageAttribute(request, response); - else if (action.equalsIgnoreCase( "DescribeImages" )) describeImages(request, response); - else if (action.equalsIgnoreCase( "DescribeInstanceAttribute" )) describeInstanceAttribute(request, response); - else if (action.equalsIgnoreCase( "DescribeInstances" )) describeInstances(request, response); - else if (action.equalsIgnoreCase( "DescribeSecurityGroups" )) describeSecurityGroups(request, response); - else if (action.equalsIgnoreCase( "DescribeSnapshots" )) describeSnapshots(request, response); - else if (action.equalsIgnoreCase( "DescribeVolumes" )) describeVolumes(request, response); - else if (action.equalsIgnoreCase( "DetachVolume" )) detachVolume(request, response); - else if (action.equalsIgnoreCase( "DisassociateAddress" )) disassociateAddress(request, response); - else if (action.equalsIgnoreCase( "ModifyImageAttribute" )) modifyImageAttribute(request, response); - else if (action.equalsIgnoreCase( "RebootInstances" )) rebootInstances(request, response); - else if (action.equalsIgnoreCase( "RegisterImage" )) registerImage(request, response); - else if (action.equalsIgnoreCase( "ReleaseAddress" )) releaseAddress(request, response); - else if (action.equalsIgnoreCase( "ResetImageAttribute" )) resetImageAttribute(request, response); - else if (action.equalsIgnoreCase( "RevokeSecurityGroupIngress")) revokeSecurityGroupIngress(request, response); - else if (action.equalsIgnoreCase( "RunInstances" )) runInstances(request, response); - else if (action.equalsIgnoreCase( "StartInstances" )) startInstances(request, response); - else if (action.equalsIgnoreCase( "StopInstances" )) stopInstances(request, response); - else if (action.equalsIgnoreCase( "TerminateInstances" )) terminateInstances(request, response); - else if (action.equalsIgnoreCase( "SetCertificate" )) setCertificate(request, response); - else if (action.equalsIgnoreCase( "DeleteCertificate" )) deleteCertificate(request, response); - else if (action.equalsIgnoreCase( "SetOfferMapping" )) setOfferMapping(request, response); - else if (action.equalsIgnoreCase( "DeleteOfferMapping" )) deleteOfferMapping(request, response); - else if (action.equalsIgnoreCase( "CreateKeyPair" )) createKeyPair(request, response); - else if (action.equalsIgnoreCase( "ImportKeyPair" )) importKeyPair(request, response); - else if (action.equalsIgnoreCase( "DeleteKeyPair" )) deleteKeyPair(request, response); - else if (action.equalsIgnoreCase( "DescribeKeyPairs" )) describeKeyPairs(request, response); - else if (action.equalsIgnoreCase( "GetPasswordData" )) getPasswordData(request, response); - else { - logger.error("Unsupported action " + action); - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } -<<<<<<< HEAD - PersistContext.commitTransaction(); - PersistContext.commitTransaction(true); - -======= - ->>>>>>> 6472e7b... Now really adding the renamed files! - } catch( EC2ServiceException e ) { - response.setStatus(e.getErrorCode()); - - if (e.getCause() != null && e.getCause() instanceof AxisFault) - faultResponse(response, ((AxisFault)e.getCause()).getFaultCode().getLocalPart(), e.getMessage()); - else { - logger.error("EC2ServiceException: " + e.getMessage(), e); - endResponse(response, e.toString()); - } - } catch( PermissionDeniedException e ) { - logger.error("Unexpected exception: " + e.getMessage(), e); - response.setStatus(403); - endResponse(response, "Access denied"); - - } catch( Exception e ) { - logger.error("Unexpected exception: " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, e.toString()); - - } finally { - try { - response.flushBuffer(); - } catch (IOException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } -<<<<<<< HEAD - PersistContext.closeSession(); - PersistContext.closeSession(true); -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - } - } - - /** - * Provide an easy way to determine the version of the implementation running. - * - * This is an unauthenticated REST call. - */ - private void cloudEC2Version( HttpServletRequest request, HttpServletResponse response ) { -<<<<<<< HEAD - String version_response = new String( "" + version + "" ); - response.setStatus(200); - endResponse(response, version_response); -======= - String version = new String( "1.03" ); - response.setStatus(200); - endResponse(response, version); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - /** - * This request registers the Cloud.com account holder to the EC2 service. The Cloud.com - * account holder saves his API access and secret keys with the EC2 service so that - * the EC2 service can make Cloud.com API calls on his behalf. The given API access - * and secret key are saved into the "usercredentials" database table. - * - * This is an unauthenticated REST call. The only required parameters are 'accesskey' and - * 'secretkey'. - * - * To verify that the given keys represent an existing account they are used to execute the - * Cloud.com's listAccounts API function. If the keys do not represent a valid account the - * listAccounts function will fail. - * - * A user can call this REST function any number of times, on each call the Cloud.com secret - * key is simply over writes any previously stored value. - * - * As with all REST calls HTTPS should be used to ensure their security. - */ - private void setUserKeys( HttpServletRequest request, HttpServletResponse response ) { - String[] accessKey = null; - String[] secretKey = null; - - try { - // -> all these parameters are required - accessKey = request.getParameterValues( "accesskey" ); - if ( null == accessKey || 0 == accessKey.length ) { - response.sendError(530, "Missing accesskey parameter" ); - return; - } - - secretKey = request.getParameterValues( "secretkey" ); - if ( null == secretKey || 0 == secretKey.length ) { - response.sendError(530, "Missing secretkey parameter" ); - return; - } - } catch( Exception e ) { - logger.error("SetUserKeys exception " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, "SetUserKeys exception " + e.getMessage()); - return; - } - - // prime UserContext here -// logger.debug("initializing context"); - UserContext context = UserContext.current(); - - try { - // -> use the keys to see if the account actually exists - ServiceProvider.getInstance().getEC2Engine().validateAccount( accessKey[0], secretKey[0] ); - UserCredentialsDao credentialDao = new UserCredentialsDao(); - credentialDao.setUserKeys( accessKey[0], secretKey[0] ); - - } catch( Exception e ) { - logger.error("SetUserKeys " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; - } - response.setStatus(200); - endResponse(response, "User keys set successfully"); - } - - /** - * The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that - * the client have a public/private key pair and the public key defined by a X509 certificate. - * Thus in order for a Cloud.com account holder to use the EC2's SOAP API he must register - * his X509 certificate with the EC2 service. This function allows the Cloud.com account - * holder to "load" his X509 certificate into the service. Note, that the SetUserKeys REST - * function must be called before this call. - * - * This is an authenticated REST call and as such must contain all the required REST parameters - * including: Signature, Timestamp, Expires, etc. The signature is calculated using the - * Cloud.com account holder's API access and secret keys and the Amazon defined EC2 signature - * algorithm. - * - * A user can call this REST function any number of times, on each call the X509 certificate - * simply over writes any previously stored value. - */ - private void setCertificate( HttpServletRequest request, HttpServletResponse response ) - throws Exception { - try { - // [A] Pull the cert and cloud AccessKey from the request - String[] certificate = request.getParameterValues( "cert" ); - if (null == certificate || 0 == certificate.length) { - response.sendError(530, "Missing cert parameter" ); - return; - } -// logger.debug( "SetCertificate cert: [" + certificate[0] + "]" ); - - String [] accessKey = request.getParameterValues( "AWSAccessKeyId" ); - if ( null == accessKey || 0 == accessKey.length ) { - response.sendError(530, "Missing AWSAccessKeyId parameter" ); - return; - } - - // [B] Open our keystore - FileInputStream fsIn = new FileInputStream( pathToKeystore ); - KeyStore certStore = KeyStore.getInstance( "JKS" ); - certStore.load( fsIn, keystorePassword.toCharArray()); - - // -> use the Cloud API key to save the cert in the keystore - // -> write the cert into the keystore on disk - Certificate userCert = null; - CertificateFactory cf = CertificateFactory.getInstance( "X.509" ); - - ByteArrayInputStream bs = new ByteArrayInputStream( certificate[0].getBytes()); - while (bs.available() > 0) userCert = cf.generateCertificate(bs); - certStore.setCertificateEntry( accessKey[0], userCert ); - - FileOutputStream fsOut = new FileOutputStream( pathToKeystore ); - certStore.store( fsOut, keystorePassword.toCharArray()); - - // [C] Associate the cert's uniqueId with the Cloud API keys - String uniqueId = AuthenticationUtils.X509CertUniqueId( userCert ); - logger.debug( "SetCertificate, uniqueId: " + uniqueId ); - UserCredentialsDao credentialDao = new UserCredentialsDao(); - credentialDao.setCertificateId( accessKey[0], uniqueId ); - response.setStatus(200); - endResponse(response, "User certificate set successfully"); - - } catch( NoSuchObjectException e ) { - logger.error("SetCertificate exception " + e.getMessage(), e); - response.sendError(404, "SetCertificate exception " + e.getMessage()); - - } catch( Exception e ) { - logger.error("SetCertificate exception " + e.getMessage(), e); - response.sendError(500, "SetCertificate exception " + e.getMessage()); - } - } - - /** - * The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that - * the client have a public/private key pair and the public key defined by a X509 certificate. - * This REST call allows a Cloud.com account holder to remove a previouly "loaded" X509 - * certificate out of the EC2 service. - * - * This is an unauthenticated REST call and as such must contain all the required REST parameters - * including: Signature, Timestamp, Expires, etc. The signature is calculated using the - * Cloud.com account holder's API access and secret keys and the Amazon defined EC2 signature - * algorithm. - */ - private void deleteCertificate( HttpServletRequest request, HttpServletResponse response ) - throws Exception { - try { - String [] accessKey = request.getParameterValues( "AWSAccessKeyId" ); - if ( null == accessKey || 0 == accessKey.length ) { - response.sendError(530, "Missing AWSAccessKeyId parameter" ); - return; - } - - // -> delete the specified entry and save back to disk - FileInputStream fsIn = new FileInputStream( pathToKeystore ); - KeyStore certStore = KeyStore.getInstance( "JKS" ); - certStore.load( fsIn, keystorePassword.toCharArray()); - - if ( certStore.containsAlias( accessKey[0] )) { - certStore.deleteEntry( accessKey[0] ); - FileOutputStream fsOut = new FileOutputStream( pathToKeystore ); - certStore.store( fsOut, keystorePassword.toCharArray()); - - // -> dis-associate the cert's uniqueId with the Cloud API keys - UserCredentialsDao credentialDao = new UserCredentialsDao(); - credentialDao.setCertificateId( accessKey[0], null ); - response.setStatus(200); - endResponse(response, "User certificate deleted successfully"); - } - else response.setStatus(404); - - } catch( NoSuchObjectException e ) { - logger.error("SetCertificate exception " + e.getMessage(), e); - response.sendError(404, "SetCertificate exception " + e.getMessage()); - - } catch( Exception e ) { - logger.error("DeleteCertificate exception " + e.getMessage(), e); - response.sendError(500, "DeleteCertificate exception " + e.getMessage()); - } - } - - /** - * Allow the caller to define the mapping between the Amazon instance type strings - * (e.g., m1.small, cc1.4xlarge) and the cloudstack service offering ids. Setting - * an existing mapping just over writes the prevous values. - */ - private void setOfferMapping( HttpServletRequest request, HttpServletResponse response ) { - String amazonOffer = null; - String cloudOffer = null; - - try { - // -> all these parameters are required - amazonOffer = request.getParameter( "amazonoffer" ); - if ( null == amazonOffer ) { - response.sendError(530, "Missing amazonoffer parameter" ); - return; - } - - cloudOffer = request.getParameter( "cloudoffer" ); - if ( null == cloudOffer ) { - response.sendError(530, "Missing cloudoffer parameter" ); - return; - } - } catch( Exception e ) { - logger.error("SetOfferMapping exception " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, "SetOfferMapping exception " + e.getMessage()); - return; - } -<<<<<<< HEAD - - // validate account is admin level - try { - CloudStackAccount currentAccount = ServiceProvider.getInstance().getEC2Engine().getCurrentAccount(); - - if (currentAccount.getAccountType() != 1) { - logger.debug("SetOfferMapping called by non-admin user!"); - response.setStatus(500); - endResponse(response, "Permission denied for non-admin user to setOfferMapping!"); - return; - } - } catch (Exception e) { - logger.error("SetOfferMapping " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; - } -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - - try { - OfferingDao ofDao = new OfferingDao(); - ofDao.setOfferMapping( amazonOffer, cloudOffer ); - - } catch( Exception e ) { - logger.error("SetOfferMapping " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; - } - response.setStatus(200); - endResponse(response, "offering mapping set successfully"); - } - - private void deleteOfferMapping( HttpServletRequest request, HttpServletResponse response ) { - String amazonOffer = null; - - try { - // -> all these parameters are required - amazonOffer = request.getParameter( "amazonoffer" ); - if ( null == amazonOffer ) { - response.sendError(530, "Missing amazonoffer parameter" ); - return; - } - - } catch( Exception e ) { - logger.error("DeleteOfferMapping exception " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, "DeleteOfferMapping exception " + e.getMessage()); - return; - } -<<<<<<< HEAD - - // validate account is admin level - try { - CloudStackAccount currentAccount = ServiceProvider.getInstance().getEC2Engine().getCurrentAccount(); - - if (currentAccount.getAccountType() != 1) { - logger.debug("deleteOfferMapping called by non-admin user!"); - response.setStatus(500); - endResponse(response, "Permission denied for non-admin user to deleteOfferMapping!"); - return; - } - } catch (Exception e) { - logger.error("deleteOfferMapping " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; - } -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - - try { - OfferingDao ofDao = new OfferingDao(); - ofDao.deleteOfferMapping( amazonOffer ); - - } catch( Exception e ) { - logger.error("DeleteOfferMapping " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; - } - response.setStatus(200); - endResponse(response, "offering mapping deleted successfully"); - } - - /** - * The approach taken here is to map these REST calls into the same objects used - * to implement the matching SOAP requests (e.g., AttachVolume). This is done by parsing - * out the URL parameters and loading them into the relevant EC2XXX object(s). Once - * the parameters are loaded the appropriate EC2Engine function is called to perform - * the requested action. The result of the EC2Engine function is a standard - * Amazon WSDL defined object (e.g., AttachVolumeResponse Java object). Finally the - * serialize method is called on the returned response object to obtain the extected - * response XML. - */ - private void attachVolume( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Volume EC2request = new EC2Volume(); - - // -> all these parameters are required - String[] volumeId = request.getParameterValues( "VolumeId" ); - if ( null != volumeId && 0 < volumeId.length ) - EC2request.setId( volumeId[0] ); - else { response.sendError(530, "Missing VolumeId parameter" ); return; } - - String[] instanceId = request.getParameterValues( "InstanceId" ); - if ( null != instanceId && 0 < instanceId.length ) - EC2request.setInstanceId( instanceId[0] ); - else { response.sendError(530, "Missing InstanceId parameter" ); return; } - - String[] device = request.getParameterValues( "Device" ); - if ( null != device && 0 < device.length ) - EC2request.setDevice( device[0] ); - else { response.sendError(530, "Missing Device parameter" ); return; } - - // -> execute the request - AttachVolumeResponse EC2response = EC2SoapServiceImpl.toAttachVolumeResponse( ServiceProvider.getInstance().getEC2Engine().attachVolume( EC2request )); - serializeResponse(response, EC2response); - } - - /** - * The SOAP equivalent of this function appears to allow multiple permissions per request, yet - * in the REST API documentation only one permission is allowed. - */ - private void revokeSecurityGroupIngress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2AuthorizeRevokeSecurityGroup EC2request = new EC2AuthorizeRevokeSecurityGroup(); - - String[] groupName = request.getParameterValues( "GroupName" ); - if ( null != groupName && 0 < groupName.length ) - EC2request.setName( groupName[0] ); - else { response.sendError(530, "Missing GroupName parameter" ); return; } - - EC2IpPermission perm = new EC2IpPermission(); - - String[] protocol = request.getParameterValues( "IpProtocol" ); - if ( null != protocol && 0 < protocol.length ) - perm.setProtocol( protocol[0] ); - else { response.sendError(530, "Missing IpProtocol parameter" ); return; } - - String[] fromPort = request.getParameterValues( "FromPort" ); - if ( null != fromPort && 0 < fromPort.length ) - perm.setProtocol( fromPort[0] ); - else { response.sendError(530, "Missing FromPort parameter" ); return; } - - String[] toPort = request.getParameterValues( "ToPort" ); - if ( null != toPort && 0 < toPort.length ) - perm.setProtocol( toPort[0] ); - else { response.sendError(530, "Missing ToPort parameter" ); return; } - - String[] ranges = request.getParameterValues( "CidrIp" ); - if ( null != ranges && 0 < ranges.length) - perm.addIpRange( ranges[0] ); - else { response.sendError(530, "Missing CidrIp parameter" ); return; } - - String[] user = request.getParameterValues( "SourceSecurityGroupOwnerId" ); - if ( null == user || 0 == user.length) { - response.sendError(530, "Missing SourceSecurityGroupOwnerId parameter" ); - return; - } - - String[] name = request.getParameterValues( "SourceSecurityGroupName" ); - if ( null == name || 0 == name.length) { - response.sendError(530, "Missing SourceSecurityGroupName parameter" ); - return; - } - - EC2SecurityGroup group = new EC2SecurityGroup(); - group.setAccount( user[0] ); - group.setName( name[0] ); - perm.addUser( group ); - EC2request.addIpPermission( perm ); - - // -> execute the request - RevokeSecurityGroupIngressResponse EC2response = EC2SoapServiceImpl.toRevokeSecurityGroupIngressResponse( - ServiceProvider.getInstance().getEC2Engine().revokeSecurityGroup( EC2request )); - serializeResponse(response, EC2response); - } - - private void authorizeSecurityGroupIngress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - // -> parse the complicated paramters into our standard object - EC2AuthorizeRevokeSecurityGroup EC2request = new EC2AuthorizeRevokeSecurityGroup(); - - String[] groupName = request.getParameterValues( "GroupName" ); - if ( null != groupName && 0 < groupName.length ) - EC2request.setName( groupName[0] ); - else { response.sendError(530, "Missing GroupName parameter" ); return; } - - // -> not clear how many parameters there are until we fail to get IpPermissions.n.IpProtocol - int nCount = 1; - do - { EC2IpPermission perm = new EC2IpPermission(); - - String[] protocol = request.getParameterValues( "IpPermissions." + nCount + ".IpProtocol" ); - if ( null != protocol && 0 < protocol.length ) - perm.setProtocol( protocol[0] ); - else break; - - String[] fromPort = request.getParameterValues( "IpPermissions." + nCount + ".FromPort" ); - if (null != fromPort && 0 < fromPort.length) perm.setProtocol( fromPort[0] ); - - String[] toPort = request.getParameterValues( "IpPermissions." + nCount + ".ToPort" ); - if (null != toPort && 0 < toPort.length) perm.setProtocol( toPort[0] ); - - // -> list: IpPermissions.n.IpRanges.m.CidrIp - int mCount = 1; - do - { String[] ranges = request.getParameterValues( "IpPermissions." + nCount + ".IpRanges." + mCount + ".CidrIp" ); - if ( null != ranges && 0 < ranges.length) - perm.addIpRange( ranges[0] ); - else break; - mCount++; - - } while( true ); - - // -> list: IpPermissions.n.Groups.m.UserId and IpPermissions.n.Groups.m.GroupName - mCount = 1; - do - { String[] user = request.getParameterValues( "IpPermissions." + nCount + ".Groups." + mCount + ".UserId" ); - if ( null == user || 0 == user.length) break; - - String[] name = request.getParameterValues( "IpPermissions." + nCount + ".Groups." + mCount + ".GroupName" ); - if ( null == name || 0 == name.length) break; - - EC2SecurityGroup group = new EC2SecurityGroup(); - group.setAccount( user[0] ); - group.setName( name[0] ); - perm.addUser( group ); - mCount++; - - } while( true ); - - // -> multiple IP permissions can be specified per group name - EC2request.addIpPermission( perm ); - nCount++; - - } while( true ); - - if (1 == nCount) { response.sendError(530, "At least one IpPermissions required" ); return; } - - - // -> execute the request - AuthorizeSecurityGroupIngressResponse EC2response = EC2SoapServiceImpl.toAuthorizeSecurityGroupIngressResponse( - ServiceProvider.getInstance().getEC2Engine().authorizeSecurityGroup( EC2request )); - serializeResponse(response, EC2response); - } - - private void detachVolume( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Volume EC2request = new EC2Volume(); - - String[] volumeId = request.getParameterValues( "VolumeId" ); - if ( null != volumeId && 0 < volumeId.length ) - EC2request.setId(volumeId[0]); - else { response.sendError(530, "Missing VolumeId parameter" ); return; } - - String[] instanceId = request.getParameterValues( "InstanceId" ); - if ( null != instanceId && 0 < instanceId.length ) - EC2request.setInstanceId(instanceId[0]); - - String[] device = request.getParameterValues( "Device" ); - if ( null != device && 0 < device.length ) - EC2request.setDevice( device[0] ); - - // -> execute the request - DetachVolumeResponse EC2response = EC2SoapServiceImpl.toDetachVolumeResponse( ServiceProvider.getInstance().getEC2Engine().detachVolume( EC2request )); - serializeResponse(response, EC2response); - } - - private void deleteVolume( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Volume EC2request = new EC2Volume(); - - String[] volumeId = request.getParameterValues( "VolumeId" ); - if ( null != volumeId && 0 < volumeId.length ) - EC2request.setId(volumeId[0]); - else { response.sendError(530, "Missing VolumeId parameter" ); return; } - - // -> execute the request - DeleteVolumeResponse EC2response = EC2SoapServiceImpl.toDeleteVolumeResponse( ServiceProvider.getInstance().getEC2Engine().deleteVolume( EC2request )); - serializeResponse(response, EC2response); - } - - private void createVolume( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2CreateVolume EC2request = new EC2CreateVolume(); - - String[] zoneName = request.getParameterValues( "AvailabilityZone" ); - if ( null != zoneName && 0 < zoneName.length ) - EC2request.setZoneName( zoneName[0] ); - else { response.sendError(530, "Missing AvailabilityZone parameter" ); return; } - - String[] size = request.getParameterValues( "Size" ); - String[] snapshotId = request.getParameterValues("SnapshotId"); - boolean useSnapshot = false; - boolean useSize = false; - - if (null != size && 0 < size.length) - useSize = true; - - if (snapshotId != null && snapshotId.length != 0) - useSnapshot = true; - - if (useSize && !useSnapshot) { - EC2request.setSize( size[0] ); - } else if (useSnapshot && !useSize) { - EC2request.setSnapshotId(snapshotId[0]); - } else if (useSize && useSnapshot) { - response.sendError(530, "Size and SnapshotId parameters are mutually exclusive" ); return; - } else { - response.sendError(530, "Size or SnapshotId has to be specified" ); return; - } - - - // -> execute the request - CreateVolumeResponse EC2response = EC2SoapServiceImpl.toCreateVolumeResponse( ServiceProvider.getInstance().getEC2Engine().createVolume( EC2request )); - serializeResponse(response, EC2response); - } - - private void createSecurityGroup( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - - String groupName, groupDescription = null; - - String[] name = request.getParameterValues( "GroupName" ); - if ( null != name && 0 < name.length ) - groupName = name[0]; - else { response.sendError(530, "Missing GroupName parameter" ); return; } - - String[] desc = request.getParameterValues( "GroupDescription" ); - if ( null != desc && 0 < desc.length ) - groupDescription = desc[0]; - else { response.sendError(530, "Missing GroupDescription parameter" ); return; } - - // -> execute the request - CreateSecurityGroupResponse EC2response = EC2SoapServiceImpl.toCreateSecurityGroupResponse( ServiceProvider.getInstance().getEC2Engine().createSecurityGroup( groupName, groupDescription )); - serializeResponse(response, EC2response); - } - - private void deleteSecurityGroup( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - String groupName = null; - - String[] name = request.getParameterValues( "GroupName" ); - if ( null != name && 0 < name.length ) - groupName = name[0]; - else { response.sendError(530, "Missing GroupName parameter" ); return; } - - // -> execute the request - DeleteSecurityGroupResponse EC2response = EC2SoapServiceImpl.toDeleteSecurityGroupResponse( ServiceProvider.getInstance().getEC2Engine().deleteSecurityGroup( groupName )); - serializeResponse(response, EC2response); - } - - private void deleteSnapshot( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - String snapshotId = null; - - String[] snapSet = request.getParameterValues( "SnapshotId" ); - if ( null != snapSet && 0 < snapSet.length ) - snapshotId = snapSet[0]; - else { response.sendError(530, "Missing SnapshotId parameter" ); return; } - - // -> execute the request - DeleteSnapshotResponse EC2response = EC2SoapServiceImpl.toDeleteSnapshotResponse( ServiceProvider.getInstance().getEC2Engine().deleteSnapshot( snapshotId )); - serializeResponse(response, EC2response); - } - - private void createSnapshot( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - String volumeId = null; - - String[] volSet = request.getParameterValues( "VolumeId" ); - if ( null != volSet && 0 < volSet.length ) - volumeId = volSet[0]; - else { response.sendError(530, "Missing VolumeId parameter" ); return; } - - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - CreateSnapshotResponse EC2response = EC2SoapServiceImpl.toCreateSnapshotResponse( engine.createSnapshot( volumeId ), engine); - serializeResponse(response, EC2response); - } - - private void deregisterImage( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Image image = new EC2Image(); - - String[] imageId = request.getParameterValues( "ImageId" ); - if ( null != imageId && 0 < imageId.length ) - image.setId( imageId[0] ); - else { response.sendError(530, "Missing ImageId parameter" ); return; } - - // -> execute the request - DeregisterImageResponse EC2response = EC2SoapServiceImpl.toDeregisterImageResponse( ServiceProvider.getInstance().getEC2Engine().deregisterImage( image )); - serializeResponse(response, EC2response); - } - - private void createImage( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2CreateImage EC2request = new EC2CreateImage(); - - String[] instanceId = request.getParameterValues( "InstanceId" ); - if ( null != instanceId && 0 < instanceId.length ) - EC2request.setInstanceId( instanceId[0] ); - else { response.sendError(530, "Missing InstanceId parameter" ); return; } - - String[] name = request.getParameterValues( "Name" ); - if ( null != name && 0 < name.length ) - EC2request.setName( name[0] ); - else { response.sendError(530, "Missing Name parameter" ); return; } - - String[] description = request.getParameterValues( "Description" ); - if ( null != description && 0 < description.length ) - EC2request.setDescription( description[0] ); - - // -> execute the request - CreateImageResponse EC2response = EC2SoapServiceImpl.toCreateImageResponse( ServiceProvider.getInstance().getEC2Engine().createImage( EC2request )); - serializeResponse(response, EC2response); - } - - private void registerImage( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2RegisterImage EC2request = new EC2RegisterImage(); - - String[] location = request.getParameterValues( "ImageLocation" ); - if ( null != location && 0 < location.length ) - EC2request.setLocation( location[0] ); - else { response.sendError(530, "Missing ImageLocation parameter" ); return; } - - String[] cloudRedfined = request.getParameterValues( "Architecture" ); - if ( null != cloudRedfined && 0 < cloudRedfined.length ) - EC2request.setArchitecture( cloudRedfined[0] ); - else { response.sendError(530, "Missing Architecture parameter" ); return; } - - String[] name = request.getParameterValues( "Name" ); - if ( null != name && 0 < name.length ) - EC2request.setName( name[0] ); - - String[] description = request.getParameterValues( "Description" ); - if ( null != description && 0 < description.length ) - EC2request.setDescription( description[0] ); - - // -> execute the request - RegisterImageResponse EC2response = EC2SoapServiceImpl.toRegisterImageResponse( ServiceProvider.getInstance().getEC2Engine().registerImage( EC2request )); - serializeResponse(response, EC2response); - } - - private void modifyImageAttribute( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Image image = new EC2Image(); - - // -> its interesting to note that the SOAP API docs has description but the REST API docs do not - String[] imageId = request.getParameterValues( "ImageId" ); - if ( null != imageId && 0 < imageId.length ) - image.setId( imageId[0] ); - else { response.sendError(530, "Missing ImageId parameter" ); return; } - - String[] description = request.getParameterValues( "Description" ); - if ( null != description && 0 < description.length ) - image.setDescription( description[0] ); - else { response.sendError(530, "Missing Description parameter" ); return; } - - // -> execute the request - ModifyImageAttributeResponse EC2response = EC2SoapServiceImpl.toModifyImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().modifyImageAttribute( image )); - serializeResponse(response, EC2response); - } - - private void resetImageAttribute( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Image image = new EC2Image(); - - String[] imageId = request.getParameterValues( "ImageId" ); - if ( null != imageId && 0 < imageId.length ) - image.setId( imageId[0] ); - else { response.sendError(530, "Missing ImageId parameter" ); return; } - - // -> execute the request - image.setDescription( "" ); - ResetImageAttributeResponse EC2response = EC2SoapServiceImpl.toResetImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().modifyImageAttribute( image )); - serializeResponse(response, EC2response); - } - - private void runInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2RunInstances EC2request = new EC2RunInstances(); - - // -> so in the Amazon docs for this REST call there is no userData even though there is in the SOAP docs - String[] imageId = request.getParameterValues( "ImageId" ); - if ( null != imageId && 0 < imageId.length ) - EC2request.setTemplateId( imageId[0] ); - else { response.sendError(530, "Missing ImageId parameter" ); return; } - - String[] minCount = request.getParameterValues( "MinCount" ); - if ( null != minCount && 0 < minCount.length ) - EC2request.setMinCount( Integer.parseInt( minCount[0] )); - else { response.sendError(530, "Missing MinCount parameter" ); return; } - - String[] maxCount = request.getParameterValues( "MaxCount" ); - if ( null != maxCount && 0 < maxCount.length ) - EC2request.setMaxCount( Integer.parseInt( maxCount[0] )); - else { response.sendError(530, "Missing MaxCount parameter" ); return; } - - String[] instanceType = request.getParameterValues( "InstanceType" ); - if ( null != instanceType && 0 < instanceType.length ) - EC2request.setInstanceType( instanceType[0] ); - - String[] zoneName = request.getParameterValues( "Placement.AvailabilityZone" ); - if ( null != zoneName && 0 < zoneName.length ) - EC2request.setZoneName( zoneName[0] ); - - String[] size = request.getParameterValues("size"); - if (size != null) { - EC2request.setSize(Integer.valueOf(size[0])); - } - - String[] keyName = request.getParameterValues("KeyName"); - if (keyName != null) { - EC2request.setKeyName(keyName[0]); - } - - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - RunInstancesResponse EC2response = EC2SoapServiceImpl.toRunInstancesResponse( engine.runInstances( EC2request ), engine); - serializeResponse(response, EC2response); - } - - private void rebootInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2RebootInstances EC2request = new EC2RebootInstances(); - int count = 0; - - // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("InstanceId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) { - EC2request.addInstanceId( value[0] ); - count++; - } - } - } - if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } - - // -> execute the request - RebootInstancesResponse EC2response = EC2SoapServiceImpl.toRebootInstancesResponse( ServiceProvider.getInstance().getEC2Engine().rebootInstances(EC2request)); - serializeResponse(response, EC2response); - } - - private void startInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2StartInstances EC2request = new EC2StartInstances(); - int count = 0; - - // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("InstanceId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) { - EC2request.addInstanceId( value[0] ); - count++; - } - } - } - if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } - - // -> execute the request - StartInstancesResponse EC2response = EC2SoapServiceImpl.toStartInstancesResponse( ServiceProvider.getInstance().getEC2Engine().startInstances(EC2request)); - serializeResponse(response, EC2response); - } - - private void stopInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2StopInstances EC2request = new EC2StopInstances(); - int count = 0; - - // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("InstanceId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) { - EC2request.addInstanceId( value[0] ); - count++; - } - } - } - if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } - - // -> execute the request - StopInstancesResponse EC2response = EC2SoapServiceImpl.toStopInstancesResponse( ServiceProvider.getInstance().getEC2Engine().stopInstances( EC2request )); - serializeResponse(response, EC2response); - } - - private void terminateInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2StopInstances EC2request = new EC2StopInstances(); - int count = 0; - - // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("InstanceId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) { - EC2request.addInstanceId( value[0] ); - count++; - } - } - } - if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } - - // -> execute the request - EC2request.setDestroyInstances( true ); - TerminateInstancesResponse EC2response = EC2SoapServiceImpl.toTermInstancesResponse( ServiceProvider.getInstance().getEC2Engine().stopInstances( EC2request )); - serializeResponse(response, EC2response); - } - - /** - * We are reusing the SOAP code to process this request. We then use Axiom to serialize the - * resulting EC2 Amazon object into XML to return to the client. - */ - private void describeAvailabilityZones( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2DescribeAvailabilityZones EC2request = new EC2DescribeAvailabilityZones(); - - // -> load in all the "ZoneName.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("ZoneName")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addZone( value[0] ); - } - } - // -> execute the request - DescribeAvailabilityZonesResponse EC2response = EC2SoapServiceImpl.toDescribeAvailabilityZonesResponse( ServiceProvider.getInstance().getEC2Engine().handleRequest( EC2request )); - serializeResponse(response, EC2response); - } - - private void describeImages( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2DescribeImages EC2request = new EC2DescribeImages(); - - // -> load in all the "ImageId.n" parameters if any, and ignore all other parameters - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("ImageId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addImageSet( value[0] ); - } - } - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - DescribeImagesResponse EC2response = EC2SoapServiceImpl.toDescribeImagesResponse( engine.describeImages( EC2request )); - serializeResponse(response, EC2response); - } - - private void describeImageAttribute( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2DescribeImages EC2request = new EC2DescribeImages(); - - // -> only works for queries about descriptions - String[] descriptions = request.getParameterValues( "Description" ); - if ( null != descriptions && 0 < descriptions.length ) { - String[] value = request.getParameterValues( "ImageId" ); - EC2request.addImageSet( value[0] ); - } - else { - response.sendError(501, "Unsupported - only description supported" ); - return; - } - - // -> execute the request - DescribeImageAttributeResponse EC2response = EC2SoapServiceImpl.toDescribeImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().describeImages( EC2request )); - serializeResponse(response, EC2response); - } - - - private void describeInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException - { - EC2DescribeInstances EC2request = new EC2DescribeInstances(); - - // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) - { - String key = (String)names.nextElement(); - if (key.startsWith("InstanceId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addInstanceId( value[0] ); - } - } - - // -> are there any filters with this request? - EC2Filter[] filterSet = extractFilters( request ); - if (null != filterSet) - { - EC2InstanceFilterSet ifs = new EC2InstanceFilterSet(); - for( int i=0; i < filterSet.length; i++ ) ifs.addFilter( filterSet[i] ); - EC2request.setFilterSet( ifs ); - } - - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - DescribeInstancesResponse EC2response = EC2SoapServiceImpl.toDescribeInstancesResponse( engine.describeInstances( EC2request ), engine); - serializeResponse(response, EC2response); - } - - private void describeAddresses( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2DescribeAddresses ec2Request = new EC2DescribeAddresses(); - - // -> load in all the "PublicIp.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("PublicIp")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) ec2Request.addPublicIp( value[0] ); - } - } - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - serializeResponse(response, EC2SoapServiceImpl.toDescribeAddressesResponse( engine.describeAddresses( ec2Request))); - } - - private void allocateAddress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - - AllocateAddressResponse ec2Response = EC2SoapServiceImpl.toAllocateAddressResponse( engine.allocateAddress()); - - serializeResponse(response, ec2Response); - } - - private void releaseAddress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - - String publicIp = request.getParameter( "PublicIp" ); - if (publicIp == null) { - response.sendError(530, "Missing PublicIp parameter"); - return; - } - - EC2ReleaseAddress ec2Request = new EC2ReleaseAddress(); - if (ec2Request != null) { - ec2Request.setPublicIp(publicIp); - } - - ReleaseAddressResponse EC2Response = EC2SoapServiceImpl.toReleaseAddressResponse( engine.releaseAddress( ec2Request )); - - serializeResponse(response, EC2Response); - } - - private void associateAddress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - - String publicIp = request.getParameter( "PublicIp" ); - if (null == publicIp) { - response.sendError(530, "Missing PublicIp parameter" ); - return; - } - String instanceId = request.getParameter( "InstanceId" ); - if (null == instanceId) { - response.sendError(530, "Missing InstanceId parameter" ); - return; - } - - EC2AssociateAddress ec2Request = new EC2AssociateAddress(); - if (ec2Request != null) { - ec2Request.setInstanceId(instanceId); - ec2Request.setPublicIp(publicIp); - } - - AssociateAddressResponse ec2Response = EC2SoapServiceImpl.toAssociateAddressResponse( engine.associateAddress( ec2Request )); - - serializeResponse(response, ec2Response); - } - - private void disassociateAddress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - - String publicIp = request.getParameter( "PublicIp" ); - if (null == publicIp) { - response.sendError(530, "Missing PublicIp parameter" ); - return; - } - - EC2DisassociateAddress ec2Request = new EC2DisassociateAddress(); - if (ec2Request != null) { - ec2Request.setPublicIp(publicIp); - } - - DisassociateAddressResponse ec2Response = EC2SoapServiceImpl.toDisassociateAddressResponse( engine.disassociateAddress( ec2Request ) ); - - serializeResponse(response, ec2Response); - } - - - private void describeSecurityGroups( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException - { - EC2DescribeSecurityGroups EC2request = new EC2DescribeSecurityGroups(); - - // -> load in all the "GroupName.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("GroupName")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addGroupName( value[0] ); - } - } - - // -> are there any filters with this request? - EC2Filter[] filterSet = extractFilters( request ); - if (null != filterSet) { - EC2GroupFilterSet gfs = new EC2GroupFilterSet(); - for (EC2Filter filter : filterSet) gfs.addFilter( filter ); - EC2request.setFilterSet( gfs ); - } - - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - - DescribeSecurityGroupsResponse EC2response = EC2SoapServiceImpl.toDescribeSecurityGroupsResponse( engine.describeSecurityGroups( EC2request )); - serializeResponse(response, EC2response); - } - - - private void describeInstanceAttribute( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2DescribeInstances EC2request = new EC2DescribeInstances(); - String instanceType = null; - - // -> we are only handling queries about the "Attribute=instanceType" - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("Attribute")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length && value[0].equalsIgnoreCase( "instanceType" )) { - instanceType = value[0]; - break; - } - } - } - if ( null != instanceType ) { - String[] value = request.getParameterValues( "InstanceId" ); - EC2request.addInstanceId( value[0] ); - } - else { - response.sendError(501, "Unsupported - only instanceType supported" ); - return; - } - - // -> execute the request - DescribeInstanceAttributeResponse EC2response = EC2SoapServiceImpl.toDescribeInstanceAttributeResponse( ServiceProvider.getInstance().getEC2Engine().describeInstances(EC2request)); - serializeResponse(response, EC2response); - } - - - private void describeSnapshots( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException - { - EC2DescribeSnapshots EC2request = new EC2DescribeSnapshots(); - - // -> load in all the "SnapshotId.n" parameters if any, and ignore any other parameters - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) - { - String key = (String)names.nextElement(); - if (key.startsWith("SnapshotId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addSnapshotId( value[0] ); - } - } - - // -> are there any filters with this request? - EC2Filter[] filterSet = extractFilters( request ); - if (null != filterSet) - { - EC2SnapshotFilterSet sfs = new EC2SnapshotFilterSet(); - for( int i=0; i < filterSet.length; i++ ) sfs.addFilter( filterSet[i] ); - EC2request.setFilterSet( sfs ); - } - - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - DescribeSnapshotsResponse EC2response = EC2SoapServiceImpl.toDescribeSnapshotsResponse( engine.handleRequest( EC2request )); - serializeResponse(response, EC2response); - } - - - private void describeVolumes( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException - { - EC2DescribeVolumes EC2request = new EC2DescribeVolumes(); - - // -> load in all the "VolumeId.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) - { - String key = (String)names.nextElement(); - if (key.startsWith("VolumeId")) - { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addVolumeId( value[0] ); - } - } - - // -> are there any filters with this request? - EC2Filter[] filterSet = extractFilters( request ); - if (null != filterSet) - { - EC2VolumeFilterSet vfs = new EC2VolumeFilterSet(); - for( int i=0; i < filterSet.length; i++ ) vfs.addFilter( filterSet[i] ); - EC2request.setFilterSet( vfs ); - } - - // -> execute the request - DescribeVolumesResponse EC2response = EC2SoapServiceImpl.toDescribeVolumesResponse( ServiceProvider.getInstance().getEC2Engine().handleRequest( EC2request )); - serializeResponse(response, EC2response); - } - - - /** - * Example of how the filters are defined in a REST request: - * https:///?Action=DescribeVolumes - * &Filter.1.Name=attachment.instance-id - * &Filter.1.Value.1=i-1a2b3c4d - * &Filter.2.Name=attachment.delete-on-termination - * &Filter.2.Value.1=true - * - * @param request - * @return List - */ - private EC2Filter[] extractFilters( HttpServletRequest request ) - { - String filterName = null; - String value = null; - EC2Filter nextFilter = null; - boolean timeFilter = false; - int filterCount = 1; - int valueCount = 1; - - List filterSet = new ArrayList(); - - do - { filterName = request.getParameter( "Filter." + filterCount + ".Name" ); - if (null != filterName) - { - nextFilter = new EC2Filter(); - nextFilter.setName( filterName ); - timeFilter = (filterName.equalsIgnoreCase( "attachment.attach-time" ) || filterName.equalsIgnoreCase( "create-time" )); - valueCount = 1; - do - { - value = request.getParameter( "Filter." + filterCount + ".Value." + valueCount ); - if (null != value) - { - // -> time values are not encoded as regexes - if ( timeFilter ) - nextFilter.addValue( value ); - else nextFilter.addValueEncoded( value ); - - valueCount++; - } - } - while( null != value ); - - filterSet.add( nextFilter ); - filterCount++; - } - } - while( null != filterName ); - - if ( 1 == filterCount ) - return null; - else return filterSet.toArray(new EC2Filter[0]); - } - - - private void describeKeyPairs(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - EC2DescribeKeyPairs ec2Request = new EC2DescribeKeyPairs(); - - - String[] keyNames = request.getParameterValues( "KeyName" ); - if (keyNames != null) { - for (String keyName : keyNames) { - ec2Request.addKeyName(keyName); - } - } - EC2Filter[] filterSet = extractFilters( request ); - if (null != filterSet){ - EC2KeyPairFilterSet vfs = new EC2KeyPairFilterSet(); - for (EC2Filter filter : filterSet) { - vfs.addFilter(filter); - } - ec2Request.setKeyFilterSet(vfs); - } - - DescribeKeyPairsResponse EC2Response = EC2SoapServiceImpl.toDescribeKeyPairs( - ServiceProvider.getInstance().getEC2Engine().describeKeyPairs( ec2Request )); - serializeResponse(response, EC2Response); - } - - private void importKeyPair(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - - String keyName = request.getParameter("KeyName"); - String publicKeyMaterial = request.getParameter("PublicKeyMaterial"); - if (keyName==null && publicKeyMaterial==null) { - response.sendError(530, "Missing parameter"); - return; - } - - if (!publicKeyMaterial.contains(" ")) - publicKeyMaterial = new String(Base64.decodeBase64(publicKeyMaterial.getBytes())); - - - - EC2ImportKeyPair ec2Request = new EC2ImportKeyPair(); - if (ec2Request != null) { - ec2Request.setKeyName(request.getParameter("KeyName")); - ec2Request.setPublicKeyMaterial(request.getParameter("PublicKeyMaterial")); - } - - ImportKeyPairResponse EC2Response = EC2SoapServiceImpl.toImportKeyPair( - ServiceProvider.getInstance().getEC2Engine().importKeyPair( ec2Request )); - serializeResponse(response, EC2Response); - } - - private void createKeyPair(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - String keyName = request.getParameter("KeyName"); - if (keyName==null) { - response.sendError(530, "Missing KeyName parameter"); - return; - } - - EC2CreateKeyPair ec2Request = new EC2CreateKeyPair(); - if (ec2Request != null) { - ec2Request.setKeyName(keyName); - } - - CreateKeyPairResponse EC2Response = EC2SoapServiceImpl.toCreateKeyPair( - ServiceProvider.getInstance().getEC2Engine().createKeyPair(ec2Request)); - serializeResponse(response, EC2Response); - } - - private void deleteKeyPair(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - String keyName = request.getParameter("KeyName"); - if (keyName==null) { - response.sendError(530, "Missing KeyName parameter"); - return; - } - - EC2DeleteKeyPair ec2Request = new EC2DeleteKeyPair(); - ec2Request.setKeyName(keyName); - - DeleteKeyPairResponse EC2Response = EC2SoapServiceImpl.toDeleteKeyPair( - ServiceProvider.getInstance().getEC2Engine().deleteKeyPair(ec2Request)); - serializeResponse(response, EC2Response); - } - - private void getPasswordData(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - String instanceId = request.getParameter("InstanceId"); - if (instanceId==null) { - response.sendError(530, "Missing InstanceId parameter"); - return; - } - - GetPasswordDataResponse EC2Response = EC2SoapServiceImpl.toGetPasswordData( - ServiceProvider.getInstance().getEC2Engine().getPasswordData(instanceId)); - serializeResponse(response, EC2Response); - } - - /** - * This function implements the EC2 REST authentication algorithm. It uses the given - * "AWSAccessKeyId" parameter to look up the Cloud.com account holder's secret key which is - * used as input to the signature calculation. In addition, it tests the given "Expires" - * parameter to see if the signature has expired and if so the request fails. - */ - private boolean authenticateRequest( HttpServletRequest request, HttpServletResponse response ) - throws SignatureException, IOException, InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException - { - String cloudSecretKey = null; - String cloudAccessKey = null; - String signature = null; - String sigMethod = null; - - // [A] Basic parameters required for an authenticated rest request - // -> note that the Servlet engine will un-URL encode all parameters we extract via "getParameterValues()" calls - String[] awsAccess = request.getParameterValues( "AWSAccessKeyId" ); - if ( null != awsAccess && 0 < awsAccess.length ) - cloudAccessKey = awsAccess[0]; - else { response.sendError(530, "Missing AWSAccessKeyId parameter" ); return false; } - - String[] clientSig = request.getParameterValues( "Signature" ); - if ( null != clientSig && 0 < clientSig.length ) - signature = clientSig[0]; - else { response.sendError(530, "Missing Signature parameter" ); return false; } - - String[] method = request.getParameterValues( "SignatureMethod" ); - if ( null != method && 0 < method.length ) - { - sigMethod = method[0]; - if (!sigMethod.equals( "HmacSHA256" ) && !sigMethod.equals( "HmacSHA1" )) { - response.sendError(531, "Unsupported SignatureMethod value: " + sigMethod + " expecting: HmacSHA256 or HmacSHA1" ); - return false; - } - } - else { response.sendError(530, "Missing SignatureMethod parameter" ); return false; } - - String[] version = request.getParameterValues( "Version" ); - if ( null != version && 0 < version.length ) - { - if (!version[0].equals( wsdlVersion )) { - response.sendError(531, "Unsupported Version value: " + version[0] + " expecting: " + wsdlVersion ); - return false; - } - } - else { response.sendError(530, "Missing Version parameter" ); return false; } - - String[] sigVersion = request.getParameterValues( "SignatureVersion" ); - if ( null != sigVersion && 0 < sigVersion.length ) - { - if (!sigVersion[0].equals( "2" )) { - response.sendError(531, "Unsupported SignatureVersion value: " + sigVersion[0] + " expecting: 2" ); - return false; - } - } - else { response.sendError(530, "Missing SignatureVersion parameter" ); return false; } - - // -> can have only one but not both { Expires | Timestamp } headers - String[] expires = request.getParameterValues( "Expires" ); - if ( null != expires && 0 < expires.length ) - { - // -> contains the date and time at which the signature included in the request EXPIRES - if (hasSignatureExpired( expires[0] )) { - response.sendError(531, "Expires parameter indicates signature has expired: " + expires[0] ); - return false; - } - } - else - { // -> contains the date and time at which the request is SIGNED - String[] time = request.getParameterValues( "Timestamp" ); - if ( null == time || 0 == time.length ) { - response.sendError(530, "Missing Timestamp and Expires parameter, one is required" ); - return false; - } - } - - // [B] Use the cloudAccessKey to get the users secret key in the db - UserCredentialsDao credentialDao = new UserCredentialsDao(); - UserCredentials cloudKeys = credentialDao.getByAccessKey( cloudAccessKey ); - if ( null == cloudKeys ) - { - logger.debug( cloudAccessKey + " is not defined in the EC2 service - call SetUserKeys" ); - response.sendError(404, cloudAccessKey + " is not defined in the EC2 service - call SetUserKeys" ); - return false; - } - else cloudSecretKey = cloudKeys.getSecretKey(); - - - // [C] Verify the signature - // -> getting the query-string in this way maintains its URL encoding - EC2RestAuth restAuth = new EC2RestAuth(); - restAuth.setHostHeader( request.getHeader( "Host" )); - String requestUri = request.getRequestURI(); - - //If forwarded from another basepath: - String forwardedPath = (String) request.getAttribute("javax.servlet.forward.request_uri"); - if(forwardedPath!=null){ - requestUri=forwardedPath; - } - restAuth.setHTTPRequestURI( requestUri); - restAuth.setQueryString( request.getQueryString()); - - if ( restAuth.verifySignature( request.getMethod(), cloudSecretKey, signature, sigMethod )) { - UserContext.current().initContext( cloudAccessKey, cloudSecretKey, cloudAccessKey, "REST request", null ); - return true; - } - else throw new PermissionDeniedException("Invalid signature"); - } - - /** - * We check this to reduce replay attacks. - * - * @param timeStamp - * @return true - if the request is not longer valid, false otherwise - * @throws ParseException - */ - private boolean hasSignatureExpired( String timeStamp ) { - Calendar cal = EC2RestAuth.parseDateString( timeStamp ); - if (null == cal) return false; - - Date expiredTime = cal.getTime(); - Date today = new Date(); // -> gets set to time of creation - if ( 0 >= expiredTime.compareTo( today )) { - logger.debug( "timestamp given: [" + timeStamp + "], now: [" + today.toString() + "]" ); - return true; - } - else return false; - } - - private static void endResponse(HttpServletResponse response, String content) { - try { - byte[] data = content.getBytes(); - response.setContentLength(data.length); - OutputStream os = response.getOutputStream(); - os.write(data); - os.close(); - - } catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } - } - - private void logRequest(HttpServletRequest request) { - if(logger.isInfoEnabled()) { - logger.info("EC2 Request method: " + request.getMethod()); - logger.info("Request contextPath: " + request.getContextPath()); - logger.info("Request pathInfo: " + request.getPathInfo()); - logger.info("Request pathTranslated: " + request.getPathTranslated()); - logger.info("Request queryString: " + request.getQueryString()); - logger.info("Request requestURI: " + request.getRequestURI()); - logger.info("Request requestURL: " + request.getRequestURL()); - logger.info("Request servletPath: " + request.getServletPath()); - Enumeration headers = request.getHeaderNames(); - if(headers != null) { - while(headers.hasMoreElements()) { - Object headerName = headers.nextElement(); - logger.info("Request header " + headerName + ":" + request.getHeader((String)headerName)); - } - } - - Enumeration params = request.getParameterNames(); - if(params != null) { - while(params.hasMoreElements()) { - Object paramName = params.nextElement(); - logger.info("Request parameter " + paramName + ":" + - request.getParameter((String)paramName)); - } - } - } - } - - /** - * Send out an error response according to Amazon convention. - */ - private void faultResponse(HttpServletResponse response, String errorCode, String errorMessage) { - try { - OutputStreamWriter out = new OutputStreamWriter(response.getOutputStream()); - response.setContentType("text/xml; charset=UTF-8"); - out.write(""); - out.write(""); - out.write(errorCode); - out.write(""); - out.write(errorMessage); - out.write(""); - out.write(UUID.randomUUID().toString()); - out.write(""); - out.flush(); - out.close(); - } catch (IOException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } - } - - /** - * Serialize Axis beans to XML output. - */ - private void serializeResponse(HttpServletResponse response, ADBBean EC2Response) - throws ADBException, XMLStreamException, IOException { - OutputStream os = response.getOutputStream(); - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - XMLStreamWriter xmlWriter = xmlOutFactory.createXMLStreamWriter( os ); - MTOMAwareXMLSerializer MTOMWriter = new MTOMAwareXMLSerializer( xmlWriter ); - MTOMWriter.setDefaultNamespace("http://ec2.amazonaws.com/doc/" + wsdlVersion + "/"); - EC2Response.serialize( null, factory, MTOMWriter ); - xmlWriter.flush(); - xmlWriter.close(); - os.close(); - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.security.KeyStore; +import java.security.SignatureException; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.sql.SQLException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.Enumeration; +import java.util.List; +import java.util.Properties; +import java.util.UUID; + +import javax.servlet.ServletConfig; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.stream.XMLStreamException; +import javax.xml.stream.XMLStreamWriter; + +import org.apache.axiom.om.OMAbstractFactory; +import org.apache.axiom.om.OMFactory; +import org.apache.axis2.AxisFault; +import org.apache.axis2.databinding.ADBBean; +import org.apache.axis2.databinding.ADBException; +import org.apache.axis2.databinding.utils.writer.MTOMAwareXMLSerializer; +import org.apache.commons.codec.binary.Base64; +import org.apache.log4j.Logger; + +import com.amazon.ec2.AllocateAddressResponse; +import com.amazon.ec2.AssociateAddressResponse; +import com.amazon.ec2.AttachVolumeResponse; +import com.amazon.ec2.AuthorizeSecurityGroupIngressResponse; +import com.amazon.ec2.CreateImageResponse; +import com.amazon.ec2.CreateKeyPairResponse; +import com.amazon.ec2.CreateSecurityGroupResponse; +import com.amazon.ec2.CreateSnapshotResponse; +import com.amazon.ec2.CreateVolumeResponse; +import com.amazon.ec2.DeleteKeyPairResponse; +import com.amazon.ec2.DeleteSecurityGroupResponse; +import com.amazon.ec2.DeleteSnapshotResponse; +import com.amazon.ec2.DeleteVolumeResponse; +import com.amazon.ec2.DeregisterImageResponse; +import com.amazon.ec2.DescribeAvailabilityZonesResponse; +import com.amazon.ec2.DescribeImageAttributeResponse; +import com.amazon.ec2.DescribeImagesResponse; +import com.amazon.ec2.DescribeInstanceAttributeResponse; +import com.amazon.ec2.DescribeInstancesResponse; +import com.amazon.ec2.DescribeKeyPairsResponse; +import com.amazon.ec2.DescribeSecurityGroupsResponse; +import com.amazon.ec2.DescribeSnapshotsResponse; +import com.amazon.ec2.DescribeVolumesResponse; +import com.amazon.ec2.DetachVolumeResponse; +import com.amazon.ec2.DisassociateAddressResponse; +import com.amazon.ec2.GetPasswordDataResponse; +import com.amazon.ec2.ImportKeyPairResponse; +import com.amazon.ec2.ModifyImageAttributeResponse; +import com.amazon.ec2.RebootInstancesResponse; +import com.amazon.ec2.RegisterImageResponse; +import com.amazon.ec2.ReleaseAddressResponse; +import com.amazon.ec2.ResetImageAttributeResponse; +import com.amazon.ec2.RevokeSecurityGroupIngressResponse; +import com.amazon.ec2.RunInstancesResponse; +import com.amazon.ec2.StartInstancesResponse; +import com.amazon.ec2.StopInstancesResponse; +import com.amazon.ec2.TerminateInstancesResponse; +import com.cloud.bridge.model.UserCredentials; +import com.cloud.bridge.persist.PersistContext; +import com.cloud.bridge.persist.dao.OfferingDao; +import com.cloud.bridge.persist.dao.UserCredentialsDao; +import com.cloud.bridge.service.core.ec2.EC2AssociateAddress; +import com.cloud.bridge.service.core.ec2.EC2AuthorizeRevokeSecurityGroup; +import com.cloud.bridge.service.core.ec2.EC2CreateImage; +import com.cloud.bridge.service.core.ec2.EC2CreateKeyPair; +import com.cloud.bridge.service.core.ec2.EC2CreateVolume; +import com.cloud.bridge.service.core.ec2.EC2DeleteKeyPair; +import com.cloud.bridge.service.core.ec2.EC2DescribeAddresses; +import com.cloud.bridge.service.core.ec2.EC2DescribeAvailabilityZones; +import com.cloud.bridge.service.core.ec2.EC2DescribeImages; +import com.cloud.bridge.service.core.ec2.EC2DescribeInstances; +import com.cloud.bridge.service.core.ec2.EC2DescribeKeyPairs; +import com.cloud.bridge.service.core.ec2.EC2DescribeSecurityGroups; +import com.cloud.bridge.service.core.ec2.EC2DescribeSnapshots; +import com.cloud.bridge.service.core.ec2.EC2DescribeVolumes; +import com.cloud.bridge.service.core.ec2.EC2DisassociateAddress; +import com.cloud.bridge.service.core.ec2.EC2Engine; +import com.cloud.bridge.service.core.ec2.EC2Filter; +import com.cloud.bridge.service.core.ec2.EC2GroupFilterSet; +import com.cloud.bridge.service.core.ec2.EC2Image; +import com.cloud.bridge.service.core.ec2.EC2ImportKeyPair; +import com.cloud.bridge.service.core.ec2.EC2InstanceFilterSet; +import com.cloud.bridge.service.core.ec2.EC2IpPermission; +import com.cloud.bridge.service.core.ec2.EC2KeyPairFilterSet; +import com.cloud.bridge.service.core.ec2.EC2RebootInstances; +import com.cloud.bridge.service.core.ec2.EC2RegisterImage; +import com.cloud.bridge.service.core.ec2.EC2ReleaseAddress; +import com.cloud.bridge.service.core.ec2.EC2RunInstances; +import com.cloud.bridge.service.core.ec2.EC2SecurityGroup; +import com.cloud.bridge.service.core.ec2.EC2SnapshotFilterSet; +import com.cloud.bridge.service.core.ec2.EC2StartInstances; +import com.cloud.bridge.service.core.ec2.EC2StopInstances; +import com.cloud.bridge.service.core.ec2.EC2Volume; +import com.cloud.bridge.service.core.ec2.EC2VolumeFilterSet; +import com.cloud.bridge.service.exception.EC2ServiceException; +import com.cloud.bridge.service.exception.NoSuchObjectException; +import com.cloud.bridge.service.exception.PermissionDeniedException; +import com.cloud.bridge.service.exception.EC2ServiceException.ClientError; +import com.cloud.bridge.util.AuthenticationUtils; +import com.cloud.bridge.util.ConfigurationHelper; +import com.cloud.bridge.util.EC2RestAuth; +import com.cloud.stack.models.CloudStackAccount; + + +public class EC2RestServlet extends HttpServlet { + + private static final long serialVersionUID = -6168996266762804888L; + + public static final Logger logger = Logger.getLogger(EC2RestServlet.class); + + private OMFactory factory = OMAbstractFactory.getOMFactory(); + private XMLOutputFactory xmlOutFactory = XMLOutputFactory.newInstance(); + + private String pathToKeystore = null; + private String keystorePassword = null; + private String wsdlVersion = null; + private String version = null; + + boolean debug=true; + + + /** + * We build the path to where the keystore holding the WS-Security X509 certificates + * are stored. + */ + @Override + public void init( ServletConfig config ) throws ServletException { + File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); + Properties EC2Prop = null; + + if (null != propertiesFile) { + logger.info("Use EC2 properties file: " + propertiesFile.getAbsolutePath()); + EC2Prop = new Properties(); + try { + EC2Prop.load( new FileInputStream( propertiesFile )); + } catch (FileNotFoundException e) { + logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); + } catch (IOException e) { + logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); + } + String keystore = EC2Prop.getProperty( "keystore" ); + keystorePassword = EC2Prop.getProperty( "keystorePass" ); + wsdlVersion = EC2Prop.getProperty( "WSDLVersion", "2009-11-30" ); + version = EC2Prop.getProperty( "cloudbridgeVersion", "UNKNOWN VERSION" ); + + String installedPath = System.getenv("CATALINA_HOME"); + if (installedPath == null) installedPath = System.getenv("CATALINA_BASE"); + if (installedPath == null) installedPath = System.getProperty("catalina.home"); + String webappPath = config.getServletContext().getRealPath("/"); + //pathToKeystore = new String( installedPath + File.separator + "webapps" + File.separator + webappName + File.separator + "WEB-INF" + File.separator + "classes" + File.separator + keystore ); + pathToKeystore = new String( webappPath + "WEB-INF" + File.separator + "classes" + File.separator + keystore ); + } + } + + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) { + doGetOrPost(req, resp); + } + + @Override + protected void doPost(HttpServletRequest req, HttpServletResponse resp) { + doGetOrPost(req, resp); + } + + protected void doGetOrPost(HttpServletRequest request, HttpServletResponse response) { + + if(debug){ + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.request_uri: "+request.getAttribute("javax.servlet.forward.request_uri")); + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.context_path: "+request.getAttribute("javax.servlet.forward.context_path")); + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.servlet_path: "+request.getAttribute("javax.servlet.forward.servlet_path")); + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.path_info: "+request.getAttribute("javax.servlet.forward.path_info")); + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.query_string: "+request.getAttribute("javax.servlet.forward.query_string")); + + } + + + String action = request.getParameter( "Action" ); + logRequest(request); + + // -> unauthenticated calls, should still be done over HTTPS + if (action.equalsIgnoreCase( "SetUserKeys" )) { + setUserKeys(request, response); + return; + } + + if (action.equalsIgnoreCase( "CloudEC2Version" )) { + cloudEC2Version(request, response); + return; + } + + // -> authenticated calls + try { + if (!authenticateRequest( request, response )) return; + + if (action.equalsIgnoreCase( "AllocateAddress" )) allocateAddress(request, response); + else if (action.equalsIgnoreCase( "AssociateAddress" )) associateAddress(request, response); + else if (action.equalsIgnoreCase( "AttachVolume" )) attachVolume(request, response ); + else if (action.equalsIgnoreCase( "AuthorizeSecurityGroupIngress" )) authorizeSecurityGroupIngress(request, response); + else if (action.equalsIgnoreCase( "CreateImage" )) createImage(request, response); + else if (action.equalsIgnoreCase( "CreateSecurityGroup" )) createSecurityGroup(request, response); + else if (action.equalsIgnoreCase( "CreateSnapshot" )) createSnapshot(request, response); + else if (action.equalsIgnoreCase( "CreateVolume" )) createVolume(request, response); + else if (action.equalsIgnoreCase( "DeleteSecurityGroup" )) deleteSecurityGroup(request, response); + else if (action.equalsIgnoreCase( "DeleteSnapshot" )) deleteSnapshot(request, response); + else if (action.equalsIgnoreCase( "DeleteVolume" )) deleteVolume(request, response); + else if (action.equalsIgnoreCase( "DeregisterImage" )) deregisterImage(request, response); + else if (action.equalsIgnoreCase( "DescribeAddresses" )) describeAddresses(request, response); + else if (action.equalsIgnoreCase( "DescribeAvailabilityZones" )) describeAvailabilityZones(request, response); + else if (action.equalsIgnoreCase( "DescribeImageAttribute" )) describeImageAttribute(request, response); + else if (action.equalsIgnoreCase( "DescribeImages" )) describeImages(request, response); + else if (action.equalsIgnoreCase( "DescribeInstanceAttribute" )) describeInstanceAttribute(request, response); + else if (action.equalsIgnoreCase( "DescribeInstances" )) describeInstances(request, response); + else if (action.equalsIgnoreCase( "DescribeSecurityGroups" )) describeSecurityGroups(request, response); + else if (action.equalsIgnoreCase( "DescribeSnapshots" )) describeSnapshots(request, response); + else if (action.equalsIgnoreCase( "DescribeVolumes" )) describeVolumes(request, response); + else if (action.equalsIgnoreCase( "DetachVolume" )) detachVolume(request, response); + else if (action.equalsIgnoreCase( "DisassociateAddress" )) disassociateAddress(request, response); + else if (action.equalsIgnoreCase( "ModifyImageAttribute" )) modifyImageAttribute(request, response); + else if (action.equalsIgnoreCase( "RebootInstances" )) rebootInstances(request, response); + else if (action.equalsIgnoreCase( "RegisterImage" )) registerImage(request, response); + else if (action.equalsIgnoreCase( "ReleaseAddress" )) releaseAddress(request, response); + else if (action.equalsIgnoreCase( "ResetImageAttribute" )) resetImageAttribute(request, response); + else if (action.equalsIgnoreCase( "RevokeSecurityGroupIngress")) revokeSecurityGroupIngress(request, response); + else if (action.equalsIgnoreCase( "RunInstances" )) runInstances(request, response); + else if (action.equalsIgnoreCase( "StartInstances" )) startInstances(request, response); + else if (action.equalsIgnoreCase( "StopInstances" )) stopInstances(request, response); + else if (action.equalsIgnoreCase( "TerminateInstances" )) terminateInstances(request, response); + else if (action.equalsIgnoreCase( "SetCertificate" )) setCertificate(request, response); + else if (action.equalsIgnoreCase( "DeleteCertificate" )) deleteCertificate(request, response); + else if (action.equalsIgnoreCase( "SetOfferMapping" )) setOfferMapping(request, response); + else if (action.equalsIgnoreCase( "DeleteOfferMapping" )) deleteOfferMapping(request, response); + else if (action.equalsIgnoreCase( "CreateKeyPair" )) createKeyPair(request, response); + else if (action.equalsIgnoreCase( "ImportKeyPair" )) importKeyPair(request, response); + else if (action.equalsIgnoreCase( "DeleteKeyPair" )) deleteKeyPair(request, response); + else if (action.equalsIgnoreCase( "DescribeKeyPairs" )) describeKeyPairs(request, response); + else if (action.equalsIgnoreCase( "GetPasswordData" )) getPasswordData(request, response); + else { + logger.error("Unsupported action " + action); + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + PersistContext.commitTransaction(); + PersistContext.commitTransaction(true); + + } catch( EC2ServiceException e ) { + response.setStatus(e.getErrorCode()); + + if (e.getCause() != null && e.getCause() instanceof AxisFault) + faultResponse(response, ((AxisFault)e.getCause()).getFaultCode().getLocalPart(), e.getMessage()); + else { + logger.error("EC2ServiceException: " + e.getMessage(), e); + endResponse(response, e.toString()); + } + } catch( PermissionDeniedException e ) { + logger.error("Unexpected exception: " + e.getMessage(), e); + response.setStatus(403); + endResponse(response, "Access denied"); + + } catch( Exception e ) { + logger.error("Unexpected exception: " + e.getMessage(), e); + response.setStatus(500); + endResponse(response, e.toString()); + + } finally { + try { + response.flushBuffer(); + } catch (IOException e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } + PersistContext.closeSession(); + PersistContext.closeSession(true); + } + } + + /** + * Provide an easy way to determine the version of the implementation running. + * + * This is an unauthenticated REST call. + */ + private void cloudEC2Version( HttpServletRequest request, HttpServletResponse response ) { + String version_response = new String( "" + version + "" ); + response.setStatus(200); + endResponse(response, version_response); + } + + /** + * This request registers the Cloud.com account holder to the EC2 service. The Cloud.com + * account holder saves his API access and secret keys with the EC2 service so that + * the EC2 service can make Cloud.com API calls on his behalf. The given API access + * and secret key are saved into the "usercredentials" database table. + * + * This is an unauthenticated REST call. The only required parameters are 'accesskey' and + * 'secretkey'. + * + * To verify that the given keys represent an existing account they are used to execute the + * Cloud.com's listAccounts API function. If the keys do not represent a valid account the + * listAccounts function will fail. + * + * A user can call this REST function any number of times, on each call the Cloud.com secret + * key is simply over writes any previously stored value. + * + * As with all REST calls HTTPS should be used to ensure their security. + */ + private void setUserKeys( HttpServletRequest request, HttpServletResponse response ) { + String[] accessKey = null; + String[] secretKey = null; + + try { + // -> all these parameters are required + accessKey = request.getParameterValues( "accesskey" ); + if ( null == accessKey || 0 == accessKey.length ) { + response.sendError(530, "Missing accesskey parameter" ); + return; + } + + secretKey = request.getParameterValues( "secretkey" ); + if ( null == secretKey || 0 == secretKey.length ) { + response.sendError(530, "Missing secretkey parameter" ); + return; + } + } catch( Exception e ) { + logger.error("SetUserKeys exception " + e.getMessage(), e); + response.setStatus(500); + endResponse(response, "SetUserKeys exception " + e.getMessage()); + return; + } + + // prime UserContext here +// logger.debug("initializing context"); + UserContext context = UserContext.current(); + + try { + // -> use the keys to see if the account actually exists + ServiceProvider.getInstance().getEC2Engine().validateAccount( accessKey[0], secretKey[0] ); + UserCredentialsDao credentialDao = new UserCredentialsDao(); + credentialDao.setUserKeys( accessKey[0], secretKey[0] ); + + } catch( Exception e ) { + logger.error("SetUserKeys " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + return; + } + response.setStatus(200); + endResponse(response, "User keys set successfully"); + } + + /** + * The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that + * the client have a public/private key pair and the public key defined by a X509 certificate. + * Thus in order for a Cloud.com account holder to use the EC2's SOAP API he must register + * his X509 certificate with the EC2 service. This function allows the Cloud.com account + * holder to "load" his X509 certificate into the service. Note, that the SetUserKeys REST + * function must be called before this call. + * + * This is an authenticated REST call and as such must contain all the required REST parameters + * including: Signature, Timestamp, Expires, etc. The signature is calculated using the + * Cloud.com account holder's API access and secret keys and the Amazon defined EC2 signature + * algorithm. + * + * A user can call this REST function any number of times, on each call the X509 certificate + * simply over writes any previously stored value. + */ + private void setCertificate( HttpServletRequest request, HttpServletResponse response ) + throws Exception { + try { + // [A] Pull the cert and cloud AccessKey from the request + String[] certificate = request.getParameterValues( "cert" ); + if (null == certificate || 0 == certificate.length) { + response.sendError(530, "Missing cert parameter" ); + return; + } +// logger.debug( "SetCertificate cert: [" + certificate[0] + "]" ); + + String [] accessKey = request.getParameterValues( "AWSAccessKeyId" ); + if ( null == accessKey || 0 == accessKey.length ) { + response.sendError(530, "Missing AWSAccessKeyId parameter" ); + return; + } + + // [B] Open our keystore + FileInputStream fsIn = new FileInputStream( pathToKeystore ); + KeyStore certStore = KeyStore.getInstance( "JKS" ); + certStore.load( fsIn, keystorePassword.toCharArray()); + + // -> use the Cloud API key to save the cert in the keystore + // -> write the cert into the keystore on disk + Certificate userCert = null; + CertificateFactory cf = CertificateFactory.getInstance( "X.509" ); + + ByteArrayInputStream bs = new ByteArrayInputStream( certificate[0].getBytes()); + while (bs.available() > 0) userCert = cf.generateCertificate(bs); + certStore.setCertificateEntry( accessKey[0], userCert ); + + FileOutputStream fsOut = new FileOutputStream( pathToKeystore ); + certStore.store( fsOut, keystorePassword.toCharArray()); + + // [C] Associate the cert's uniqueId with the Cloud API keys + String uniqueId = AuthenticationUtils.X509CertUniqueId( userCert ); + logger.debug( "SetCertificate, uniqueId: " + uniqueId ); + UserCredentialsDao credentialDao = new UserCredentialsDao(); + credentialDao.setCertificateId( accessKey[0], uniqueId ); + response.setStatus(200); + endResponse(response, "User certificate set successfully"); + + } catch( NoSuchObjectException e ) { + logger.error("SetCertificate exception " + e.getMessage(), e); + response.sendError(404, "SetCertificate exception " + e.getMessage()); + + } catch( Exception e ) { + logger.error("SetCertificate exception " + e.getMessage(), e); + response.sendError(500, "SetCertificate exception " + e.getMessage()); + } + } + + /** + * The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that + * the client have a public/private key pair and the public key defined by a X509 certificate. + * This REST call allows a Cloud.com account holder to remove a previouly "loaded" X509 + * certificate out of the EC2 service. + * + * This is an unauthenticated REST call and as such must contain all the required REST parameters + * including: Signature, Timestamp, Expires, etc. The signature is calculated using the + * Cloud.com account holder's API access and secret keys and the Amazon defined EC2 signature + * algorithm. + */ + private void deleteCertificate( HttpServletRequest request, HttpServletResponse response ) + throws Exception { + try { + String [] accessKey = request.getParameterValues( "AWSAccessKeyId" ); + if ( null == accessKey || 0 == accessKey.length ) { + response.sendError(530, "Missing AWSAccessKeyId parameter" ); + return; + } + + // -> delete the specified entry and save back to disk + FileInputStream fsIn = new FileInputStream( pathToKeystore ); + KeyStore certStore = KeyStore.getInstance( "JKS" ); + certStore.load( fsIn, keystorePassword.toCharArray()); + + if ( certStore.containsAlias( accessKey[0] )) { + certStore.deleteEntry( accessKey[0] ); + FileOutputStream fsOut = new FileOutputStream( pathToKeystore ); + certStore.store( fsOut, keystorePassword.toCharArray()); + + // -> dis-associate the cert's uniqueId with the Cloud API keys + UserCredentialsDao credentialDao = new UserCredentialsDao(); + credentialDao.setCertificateId( accessKey[0], null ); + response.setStatus(200); + endResponse(response, "User certificate deleted successfully"); + } + else response.setStatus(404); + + } catch( NoSuchObjectException e ) { + logger.error("SetCertificate exception " + e.getMessage(), e); + response.sendError(404, "SetCertificate exception " + e.getMessage()); + + } catch( Exception e ) { + logger.error("DeleteCertificate exception " + e.getMessage(), e); + response.sendError(500, "DeleteCertificate exception " + e.getMessage()); + } + } + + /** + * Allow the caller to define the mapping between the Amazon instance type strings + * (e.g., m1.small, cc1.4xlarge) and the cloudstack service offering ids. Setting + * an existing mapping just over writes the prevous values. + */ + private void setOfferMapping( HttpServletRequest request, HttpServletResponse response ) { + String amazonOffer = null; + String cloudOffer = null; + + try { + // -> all these parameters are required + amazonOffer = request.getParameter( "amazonoffer" ); + if ( null == amazonOffer ) { + response.sendError(530, "Missing amazonoffer parameter" ); + return; + } + + cloudOffer = request.getParameter( "cloudoffer" ); + if ( null == cloudOffer ) { + response.sendError(530, "Missing cloudoffer parameter" ); + return; + } + } catch( Exception e ) { + logger.error("SetOfferMapping exception " + e.getMessage(), e); + response.setStatus(500); + endResponse(response, "SetOfferMapping exception " + e.getMessage()); + return; + } + + // validate account is admin level + try { + CloudStackAccount currentAccount = ServiceProvider.getInstance().getEC2Engine().getCurrentAccount(); + + if (currentAccount.getAccountType() != 1) { + logger.debug("SetOfferMapping called by non-admin user!"); + response.setStatus(500); + endResponse(response, "Permission denied for non-admin user to setOfferMapping!"); + return; + } + } catch (Exception e) { + logger.error("SetOfferMapping " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + return; + } + + try { + OfferingDao ofDao = new OfferingDao(); + ofDao.setOfferMapping( amazonOffer, cloudOffer ); + + } catch( Exception e ) { + logger.error("SetOfferMapping " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + return; + } + response.setStatus(200); + endResponse(response, "offering mapping set successfully"); + } + + private void deleteOfferMapping( HttpServletRequest request, HttpServletResponse response ) { + String amazonOffer = null; + + try { + // -> all these parameters are required + amazonOffer = request.getParameter( "amazonoffer" ); + if ( null == amazonOffer ) { + response.sendError(530, "Missing amazonoffer parameter" ); + return; + } + + } catch( Exception e ) { + logger.error("DeleteOfferMapping exception " + e.getMessage(), e); + response.setStatus(500); + endResponse(response, "DeleteOfferMapping exception " + e.getMessage()); + return; + } + + // validate account is admin level + try { + CloudStackAccount currentAccount = ServiceProvider.getInstance().getEC2Engine().getCurrentAccount(); + + if (currentAccount.getAccountType() != 1) { + logger.debug("deleteOfferMapping called by non-admin user!"); + response.setStatus(500); + endResponse(response, "Permission denied for non-admin user to deleteOfferMapping!"); + return; + } + } catch (Exception e) { + logger.error("deleteOfferMapping " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + return; + } + + try { + OfferingDao ofDao = new OfferingDao(); + ofDao.deleteOfferMapping( amazonOffer ); + + } catch( Exception e ) { + logger.error("DeleteOfferMapping " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + return; + } + response.setStatus(200); + endResponse(response, "offering mapping deleted successfully"); + } + + /** + * The approach taken here is to map these REST calls into the same objects used + * to implement the matching SOAP requests (e.g., AttachVolume). This is done by parsing + * out the URL parameters and loading them into the relevant EC2XXX object(s). Once + * the parameters are loaded the appropriate EC2Engine function is called to perform + * the requested action. The result of the EC2Engine function is a standard + * Amazon WSDL defined object (e.g., AttachVolumeResponse Java object). Finally the + * serialize method is called on the returned response object to obtain the extected + * response XML. + */ + private void attachVolume( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2Volume EC2request = new EC2Volume(); + + // -> all these parameters are required + String[] volumeId = request.getParameterValues( "VolumeId" ); + if ( null != volumeId && 0 < volumeId.length ) + EC2request.setId( volumeId[0] ); + else { response.sendError(530, "Missing VolumeId parameter" ); return; } + + String[] instanceId = request.getParameterValues( "InstanceId" ); + if ( null != instanceId && 0 < instanceId.length ) + EC2request.setInstanceId( instanceId[0] ); + else { response.sendError(530, "Missing InstanceId parameter" ); return; } + + String[] device = request.getParameterValues( "Device" ); + if ( null != device && 0 < device.length ) + EC2request.setDevice( device[0] ); + else { response.sendError(530, "Missing Device parameter" ); return; } + + // -> execute the request + AttachVolumeResponse EC2response = EC2SoapServiceImpl.toAttachVolumeResponse( ServiceProvider.getInstance().getEC2Engine().attachVolume( EC2request )); + serializeResponse(response, EC2response); + } + + /** + * The SOAP equivalent of this function appears to allow multiple permissions per request, yet + * in the REST API documentation only one permission is allowed. + */ + private void revokeSecurityGroupIngress( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2AuthorizeRevokeSecurityGroup EC2request = new EC2AuthorizeRevokeSecurityGroup(); + + String[] groupName = request.getParameterValues( "GroupName" ); + if ( null != groupName && 0 < groupName.length ) + EC2request.setName( groupName[0] ); + else { response.sendError(530, "Missing GroupName parameter" ); return; } + + EC2IpPermission perm = new EC2IpPermission(); + + String[] protocol = request.getParameterValues( "IpProtocol" ); + if ( null != protocol && 0 < protocol.length ) + perm.setProtocol( protocol[0] ); + else { response.sendError(530, "Missing IpProtocol parameter" ); return; } + + String[] fromPort = request.getParameterValues( "FromPort" ); + if ( null != fromPort && 0 < fromPort.length ) + perm.setProtocol( fromPort[0] ); + else { response.sendError(530, "Missing FromPort parameter" ); return; } + + String[] toPort = request.getParameterValues( "ToPort" ); + if ( null != toPort && 0 < toPort.length ) + perm.setProtocol( toPort[0] ); + else { response.sendError(530, "Missing ToPort parameter" ); return; } + + String[] ranges = request.getParameterValues( "CidrIp" ); + if ( null != ranges && 0 < ranges.length) + perm.addIpRange( ranges[0] ); + else { response.sendError(530, "Missing CidrIp parameter" ); return; } + + String[] user = request.getParameterValues( "SourceSecurityGroupOwnerId" ); + if ( null == user || 0 == user.length) { + response.sendError(530, "Missing SourceSecurityGroupOwnerId parameter" ); + return; + } + + String[] name = request.getParameterValues( "SourceSecurityGroupName" ); + if ( null == name || 0 == name.length) { + response.sendError(530, "Missing SourceSecurityGroupName parameter" ); + return; + } + + EC2SecurityGroup group = new EC2SecurityGroup(); + group.setAccount( user[0] ); + group.setName( name[0] ); + perm.addUser( group ); + EC2request.addIpPermission( perm ); + + // -> execute the request + RevokeSecurityGroupIngressResponse EC2response = EC2SoapServiceImpl.toRevokeSecurityGroupIngressResponse( + ServiceProvider.getInstance().getEC2Engine().revokeSecurityGroup( EC2request )); + serializeResponse(response, EC2response); + } + + private void authorizeSecurityGroupIngress( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + // -> parse the complicated paramters into our standard object + EC2AuthorizeRevokeSecurityGroup EC2request = new EC2AuthorizeRevokeSecurityGroup(); + + String[] groupName = request.getParameterValues( "GroupName" ); + if ( null != groupName && 0 < groupName.length ) + EC2request.setName( groupName[0] ); + else { response.sendError(530, "Missing GroupName parameter" ); return; } + + // -> not clear how many parameters there are until we fail to get IpPermissions.n.IpProtocol + int nCount = 1; + do + { EC2IpPermission perm = new EC2IpPermission(); + + String[] protocol = request.getParameterValues( "IpPermissions." + nCount + ".IpProtocol" ); + if ( null != protocol && 0 < protocol.length ) + perm.setProtocol( protocol[0] ); + else break; + + String[] fromPort = request.getParameterValues( "IpPermissions." + nCount + ".FromPort" ); + if (null != fromPort && 0 < fromPort.length) perm.setProtocol( fromPort[0] ); + + String[] toPort = request.getParameterValues( "IpPermissions." + nCount + ".ToPort" ); + if (null != toPort && 0 < toPort.length) perm.setProtocol( toPort[0] ); + + // -> list: IpPermissions.n.IpRanges.m.CidrIp + int mCount = 1; + do + { String[] ranges = request.getParameterValues( "IpPermissions." + nCount + ".IpRanges." + mCount + ".CidrIp" ); + if ( null != ranges && 0 < ranges.length) + perm.addIpRange( ranges[0] ); + else break; + mCount++; + + } while( true ); + + // -> list: IpPermissions.n.Groups.m.UserId and IpPermissions.n.Groups.m.GroupName + mCount = 1; + do + { String[] user = request.getParameterValues( "IpPermissions." + nCount + ".Groups." + mCount + ".UserId" ); + if ( null == user || 0 == user.length) break; + + String[] name = request.getParameterValues( "IpPermissions." + nCount + ".Groups." + mCount + ".GroupName" ); + if ( null == name || 0 == name.length) break; + + EC2SecurityGroup group = new EC2SecurityGroup(); + group.setAccount( user[0] ); + group.setName( name[0] ); + perm.addUser( group ); + mCount++; + + } while( true ); + + // -> multiple IP permissions can be specified per group name + EC2request.addIpPermission( perm ); + nCount++; + + } while( true ); + + if (1 == nCount) { response.sendError(530, "At least one IpPermissions required" ); return; } + + + // -> execute the request + AuthorizeSecurityGroupIngressResponse EC2response = EC2SoapServiceImpl.toAuthorizeSecurityGroupIngressResponse( + ServiceProvider.getInstance().getEC2Engine().authorizeSecurityGroup( EC2request )); + serializeResponse(response, EC2response); + } + + private void detachVolume( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2Volume EC2request = new EC2Volume(); + + String[] volumeId = request.getParameterValues( "VolumeId" ); + if ( null != volumeId && 0 < volumeId.length ) + EC2request.setId(volumeId[0]); + else { response.sendError(530, "Missing VolumeId parameter" ); return; } + + String[] instanceId = request.getParameterValues( "InstanceId" ); + if ( null != instanceId && 0 < instanceId.length ) + EC2request.setInstanceId(instanceId[0]); + + String[] device = request.getParameterValues( "Device" ); + if ( null != device && 0 < device.length ) + EC2request.setDevice( device[0] ); + + // -> execute the request + DetachVolumeResponse EC2response = EC2SoapServiceImpl.toDetachVolumeResponse( ServiceProvider.getInstance().getEC2Engine().detachVolume( EC2request )); + serializeResponse(response, EC2response); + } + + private void deleteVolume( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2Volume EC2request = new EC2Volume(); + + String[] volumeId = request.getParameterValues( "VolumeId" ); + if ( null != volumeId && 0 < volumeId.length ) + EC2request.setId(volumeId[0]); + else { response.sendError(530, "Missing VolumeId parameter" ); return; } + + // -> execute the request + DeleteVolumeResponse EC2response = EC2SoapServiceImpl.toDeleteVolumeResponse( ServiceProvider.getInstance().getEC2Engine().deleteVolume( EC2request )); + serializeResponse(response, EC2response); + } + + private void createVolume( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2CreateVolume EC2request = new EC2CreateVolume(); + + String[] zoneName = request.getParameterValues( "AvailabilityZone" ); + if ( null != zoneName && 0 < zoneName.length ) + EC2request.setZoneName( zoneName[0] ); + else { response.sendError(530, "Missing AvailabilityZone parameter" ); return; } + + String[] size = request.getParameterValues( "Size" ); + String[] snapshotId = request.getParameterValues("SnapshotId"); + boolean useSnapshot = false; + boolean useSize = false; + + if (null != size && 0 < size.length) + useSize = true; + + if (snapshotId != null && snapshotId.length != 0) + useSnapshot = true; + + if (useSize && !useSnapshot) { + EC2request.setSize( size[0] ); + } else if (useSnapshot && !useSize) { + EC2request.setSnapshotId(snapshotId[0]); + } else if (useSize && useSnapshot) { + response.sendError(530, "Size and SnapshotId parameters are mutually exclusive" ); return; + } else { + response.sendError(530, "Size or SnapshotId has to be specified" ); return; + } + + + // -> execute the request + CreateVolumeResponse EC2response = EC2SoapServiceImpl.toCreateVolumeResponse( ServiceProvider.getInstance().getEC2Engine().createVolume( EC2request )); + serializeResponse(response, EC2response); + } + + private void createSecurityGroup( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + + String groupName, groupDescription = null; + + String[] name = request.getParameterValues( "GroupName" ); + if ( null != name && 0 < name.length ) + groupName = name[0]; + else { response.sendError(530, "Missing GroupName parameter" ); return; } + + String[] desc = request.getParameterValues( "GroupDescription" ); + if ( null != desc && 0 < desc.length ) + groupDescription = desc[0]; + else { response.sendError(530, "Missing GroupDescription parameter" ); return; } + + // -> execute the request + CreateSecurityGroupResponse EC2response = EC2SoapServiceImpl.toCreateSecurityGroupResponse( ServiceProvider.getInstance().getEC2Engine().createSecurityGroup( groupName, groupDescription )); + serializeResponse(response, EC2response); + } + + private void deleteSecurityGroup( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + String groupName = null; + + String[] name = request.getParameterValues( "GroupName" ); + if ( null != name && 0 < name.length ) + groupName = name[0]; + else { response.sendError(530, "Missing GroupName parameter" ); return; } + + // -> execute the request + DeleteSecurityGroupResponse EC2response = EC2SoapServiceImpl.toDeleteSecurityGroupResponse( ServiceProvider.getInstance().getEC2Engine().deleteSecurityGroup( groupName )); + serializeResponse(response, EC2response); + } + + private void deleteSnapshot( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + String snapshotId = null; + + String[] snapSet = request.getParameterValues( "SnapshotId" ); + if ( null != snapSet && 0 < snapSet.length ) + snapshotId = snapSet[0]; + else { response.sendError(530, "Missing SnapshotId parameter" ); return; } + + // -> execute the request + DeleteSnapshotResponse EC2response = EC2SoapServiceImpl.toDeleteSnapshotResponse( ServiceProvider.getInstance().getEC2Engine().deleteSnapshot( snapshotId )); + serializeResponse(response, EC2response); + } + + private void createSnapshot( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + String volumeId = null; + + String[] volSet = request.getParameterValues( "VolumeId" ); + if ( null != volSet && 0 < volSet.length ) + volumeId = volSet[0]; + else { response.sendError(530, "Missing VolumeId parameter" ); return; } + + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + CreateSnapshotResponse EC2response = EC2SoapServiceImpl.toCreateSnapshotResponse( engine.createSnapshot( volumeId ), engine); + serializeResponse(response, EC2response); + } + + private void deregisterImage( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2Image image = new EC2Image(); + + String[] imageId = request.getParameterValues( "ImageId" ); + if ( null != imageId && 0 < imageId.length ) + image.setId( imageId[0] ); + else { response.sendError(530, "Missing ImageId parameter" ); return; } + + // -> execute the request + DeregisterImageResponse EC2response = EC2SoapServiceImpl.toDeregisterImageResponse( ServiceProvider.getInstance().getEC2Engine().deregisterImage( image )); + serializeResponse(response, EC2response); + } + + private void createImage( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2CreateImage EC2request = new EC2CreateImage(); + + String[] instanceId = request.getParameterValues( "InstanceId" ); + if ( null != instanceId && 0 < instanceId.length ) + EC2request.setInstanceId( instanceId[0] ); + else { response.sendError(530, "Missing InstanceId parameter" ); return; } + + String[] name = request.getParameterValues( "Name" ); + if ( null != name && 0 < name.length ) + EC2request.setName( name[0] ); + else { response.sendError(530, "Missing Name parameter" ); return; } + + String[] description = request.getParameterValues( "Description" ); + if ( null != description && 0 < description.length ) + EC2request.setDescription( description[0] ); + + // -> execute the request + CreateImageResponse EC2response = EC2SoapServiceImpl.toCreateImageResponse( ServiceProvider.getInstance().getEC2Engine().createImage( EC2request )); + serializeResponse(response, EC2response); + } + + private void registerImage( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2RegisterImage EC2request = new EC2RegisterImage(); + + String[] location = request.getParameterValues( "ImageLocation" ); + if ( null != location && 0 < location.length ) + EC2request.setLocation( location[0] ); + else { response.sendError(530, "Missing ImageLocation parameter" ); return; } + + String[] cloudRedfined = request.getParameterValues( "Architecture" ); + if ( null != cloudRedfined && 0 < cloudRedfined.length ) + EC2request.setArchitecture( cloudRedfined[0] ); + else { response.sendError(530, "Missing Architecture parameter" ); return; } + + String[] name = request.getParameterValues( "Name" ); + if ( null != name && 0 < name.length ) + EC2request.setName( name[0] ); + + String[] description = request.getParameterValues( "Description" ); + if ( null != description && 0 < description.length ) + EC2request.setDescription( description[0] ); + + // -> execute the request + RegisterImageResponse EC2response = EC2SoapServiceImpl.toRegisterImageResponse( ServiceProvider.getInstance().getEC2Engine().registerImage( EC2request )); + serializeResponse(response, EC2response); + } + + private void modifyImageAttribute( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2Image image = new EC2Image(); + + // -> its interesting to note that the SOAP API docs has description but the REST API docs do not + String[] imageId = request.getParameterValues( "ImageId" ); + if ( null != imageId && 0 < imageId.length ) + image.setId( imageId[0] ); + else { response.sendError(530, "Missing ImageId parameter" ); return; } + + String[] description = request.getParameterValues( "Description" ); + if ( null != description && 0 < description.length ) + image.setDescription( description[0] ); + else { response.sendError(530, "Missing Description parameter" ); return; } + + // -> execute the request + ModifyImageAttributeResponse EC2response = EC2SoapServiceImpl.toModifyImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().modifyImageAttribute( image )); + serializeResponse(response, EC2response); + } + + private void resetImageAttribute( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2Image image = new EC2Image(); + + String[] imageId = request.getParameterValues( "ImageId" ); + if ( null != imageId && 0 < imageId.length ) + image.setId( imageId[0] ); + else { response.sendError(530, "Missing ImageId parameter" ); return; } + + // -> execute the request + image.setDescription( "" ); + ResetImageAttributeResponse EC2response = EC2SoapServiceImpl.toResetImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().modifyImageAttribute( image )); + serializeResponse(response, EC2response); + } + + private void runInstances( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2RunInstances EC2request = new EC2RunInstances(); + + // -> so in the Amazon docs for this REST call there is no userData even though there is in the SOAP docs + String[] imageId = request.getParameterValues( "ImageId" ); + if ( null != imageId && 0 < imageId.length ) + EC2request.setTemplateId( imageId[0] ); + else { response.sendError(530, "Missing ImageId parameter" ); return; } + + String[] minCount = request.getParameterValues( "MinCount" ); + if ( null != minCount && 0 < minCount.length ) + EC2request.setMinCount( Integer.parseInt( minCount[0] )); + else { response.sendError(530, "Missing MinCount parameter" ); return; } + + String[] maxCount = request.getParameterValues( "MaxCount" ); + if ( null != maxCount && 0 < maxCount.length ) + EC2request.setMaxCount( Integer.parseInt( maxCount[0] )); + else { response.sendError(530, "Missing MaxCount parameter" ); return; } + + String[] instanceType = request.getParameterValues( "InstanceType" ); + if ( null != instanceType && 0 < instanceType.length ) + EC2request.setInstanceType( instanceType[0] ); + + String[] zoneName = request.getParameterValues( "Placement.AvailabilityZone" ); + if ( null != zoneName && 0 < zoneName.length ) + EC2request.setZoneName( zoneName[0] ); + + String[] size = request.getParameterValues("size"); + if (size != null) { + EC2request.setSize(Integer.valueOf(size[0])); + } + + String[] keyName = request.getParameterValues("KeyName"); + if (keyName != null) { + EC2request.setKeyName(keyName[0]); + } + + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + RunInstancesResponse EC2response = EC2SoapServiceImpl.toRunInstancesResponse( engine.runInstances( EC2request ), engine); + serializeResponse(response, EC2response); + } + + private void rebootInstances( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2RebootInstances EC2request = new EC2RebootInstances(); + int count = 0; + + // -> load in all the "InstanceId.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("InstanceId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) { + EC2request.addInstanceId( value[0] ); + count++; + } + } + } + if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } + + // -> execute the request + RebootInstancesResponse EC2response = EC2SoapServiceImpl.toRebootInstancesResponse( ServiceProvider.getInstance().getEC2Engine().rebootInstances(EC2request)); + serializeResponse(response, EC2response); + } + + private void startInstances( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2StartInstances EC2request = new EC2StartInstances(); + int count = 0; + + // -> load in all the "InstanceId.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("InstanceId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) { + EC2request.addInstanceId( value[0] ); + count++; + } + } + } + if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } + + // -> execute the request + StartInstancesResponse EC2response = EC2SoapServiceImpl.toStartInstancesResponse( ServiceProvider.getInstance().getEC2Engine().startInstances(EC2request)); + serializeResponse(response, EC2response); + } + + private void stopInstances( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2StopInstances EC2request = new EC2StopInstances(); + int count = 0; + + // -> load in all the "InstanceId.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("InstanceId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) { + EC2request.addInstanceId( value[0] ); + count++; + } + } + } + if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } + + // -> execute the request + StopInstancesResponse EC2response = EC2SoapServiceImpl.toStopInstancesResponse( ServiceProvider.getInstance().getEC2Engine().stopInstances( EC2request )); + serializeResponse(response, EC2response); + } + + private void terminateInstances( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2StopInstances EC2request = new EC2StopInstances(); + int count = 0; + + // -> load in all the "InstanceId.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("InstanceId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) { + EC2request.addInstanceId( value[0] ); + count++; + } + } + } + if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } + + // -> execute the request + EC2request.setDestroyInstances( true ); + TerminateInstancesResponse EC2response = EC2SoapServiceImpl.toTermInstancesResponse( ServiceProvider.getInstance().getEC2Engine().stopInstances( EC2request )); + serializeResponse(response, EC2response); + } + + /** + * We are reusing the SOAP code to process this request. We then use Axiom to serialize the + * resulting EC2 Amazon object into XML to return to the client. + */ + private void describeAvailabilityZones( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2DescribeAvailabilityZones EC2request = new EC2DescribeAvailabilityZones(); + + // -> load in all the "ZoneName.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("ZoneName")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addZone( value[0] ); + } + } + // -> execute the request + DescribeAvailabilityZonesResponse EC2response = EC2SoapServiceImpl.toDescribeAvailabilityZonesResponse( ServiceProvider.getInstance().getEC2Engine().handleRequest( EC2request )); + serializeResponse(response, EC2response); + } + + private void describeImages( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2DescribeImages EC2request = new EC2DescribeImages(); + + // -> load in all the "ImageId.n" parameters if any, and ignore all other parameters + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("ImageId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addImageSet( value[0] ); + } + } + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + DescribeImagesResponse EC2response = EC2SoapServiceImpl.toDescribeImagesResponse( engine.describeImages( EC2request )); + serializeResponse(response, EC2response); + } + + private void describeImageAttribute( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2DescribeImages EC2request = new EC2DescribeImages(); + + // -> only works for queries about descriptions + String[] descriptions = request.getParameterValues( "Description" ); + if ( null != descriptions && 0 < descriptions.length ) { + String[] value = request.getParameterValues( "ImageId" ); + EC2request.addImageSet( value[0] ); + } + else { + response.sendError(501, "Unsupported - only description supported" ); + return; + } + + // -> execute the request + DescribeImageAttributeResponse EC2response = EC2SoapServiceImpl.toDescribeImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().describeImages( EC2request )); + serializeResponse(response, EC2response); + } + + + private void describeInstances( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException + { + EC2DescribeInstances EC2request = new EC2DescribeInstances(); + + // -> load in all the "InstanceId.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) + { + String key = (String)names.nextElement(); + if (key.startsWith("InstanceId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addInstanceId( value[0] ); + } + } + + // -> are there any filters with this request? + EC2Filter[] filterSet = extractFilters( request ); + if (null != filterSet) + { + EC2InstanceFilterSet ifs = new EC2InstanceFilterSet(); + for( int i=0; i < filterSet.length; i++ ) ifs.addFilter( filterSet[i] ); + EC2request.setFilterSet( ifs ); + } + + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + DescribeInstancesResponse EC2response = EC2SoapServiceImpl.toDescribeInstancesResponse( engine.describeInstances( EC2request ), engine); + serializeResponse(response, EC2response); + } + + private void describeAddresses( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2DescribeAddresses ec2Request = new EC2DescribeAddresses(); + + // -> load in all the "PublicIp.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("PublicIp")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) ec2Request.addPublicIp( value[0] ); + } + } + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + serializeResponse(response, EC2SoapServiceImpl.toDescribeAddressesResponse( engine.describeAddresses( ec2Request))); + } + + private void allocateAddress( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + + AllocateAddressResponse ec2Response = EC2SoapServiceImpl.toAllocateAddressResponse( engine.allocateAddress()); + + serializeResponse(response, ec2Response); + } + + private void releaseAddress( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + + String publicIp = request.getParameter( "PublicIp" ); + if (publicIp == null) { + response.sendError(530, "Missing PublicIp parameter"); + return; + } + + EC2ReleaseAddress ec2Request = new EC2ReleaseAddress(); + if (ec2Request != null) { + ec2Request.setPublicIp(publicIp); + } + + ReleaseAddressResponse EC2Response = EC2SoapServiceImpl.toReleaseAddressResponse( engine.releaseAddress( ec2Request )); + + serializeResponse(response, EC2Response); + } + + private void associateAddress( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + + String publicIp = request.getParameter( "PublicIp" ); + if (null == publicIp) { + response.sendError(530, "Missing PublicIp parameter" ); + return; + } + String instanceId = request.getParameter( "InstanceId" ); + if (null == instanceId) { + response.sendError(530, "Missing InstanceId parameter" ); + return; + } + + EC2AssociateAddress ec2Request = new EC2AssociateAddress(); + if (ec2Request != null) { + ec2Request.setInstanceId(instanceId); + ec2Request.setPublicIp(publicIp); + } + + AssociateAddressResponse ec2Response = EC2SoapServiceImpl.toAssociateAddressResponse( engine.associateAddress( ec2Request )); + + serializeResponse(response, ec2Response); + } + + private void disassociateAddress( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + + String publicIp = request.getParameter( "PublicIp" ); + if (null == publicIp) { + response.sendError(530, "Missing PublicIp parameter" ); + return; + } + + EC2DisassociateAddress ec2Request = new EC2DisassociateAddress(); + if (ec2Request != null) { + ec2Request.setPublicIp(publicIp); + } + + DisassociateAddressResponse ec2Response = EC2SoapServiceImpl.toDisassociateAddressResponse( engine.disassociateAddress( ec2Request ) ); + + serializeResponse(response, ec2Response); + } + + + private void describeSecurityGroups( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException + { + EC2DescribeSecurityGroups EC2request = new EC2DescribeSecurityGroups(); + + // -> load in all the "GroupName.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("GroupName")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addGroupName( value[0] ); + } + } + + // -> are there any filters with this request? + EC2Filter[] filterSet = extractFilters( request ); + if (null != filterSet) { + EC2GroupFilterSet gfs = new EC2GroupFilterSet(); + for (EC2Filter filter : filterSet) gfs.addFilter( filter ); + EC2request.setFilterSet( gfs ); + } + + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + + DescribeSecurityGroupsResponse EC2response = EC2SoapServiceImpl.toDescribeSecurityGroupsResponse( engine.describeSecurityGroups( EC2request )); + serializeResponse(response, EC2response); + } + + + private void describeInstanceAttribute( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2DescribeInstances EC2request = new EC2DescribeInstances(); + String instanceType = null; + + // -> we are only handling queries about the "Attribute=instanceType" + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("Attribute")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length && value[0].equalsIgnoreCase( "instanceType" )) { + instanceType = value[0]; + break; + } + } + } + if ( null != instanceType ) { + String[] value = request.getParameterValues( "InstanceId" ); + EC2request.addInstanceId( value[0] ); + } + else { + response.sendError(501, "Unsupported - only instanceType supported" ); + return; + } + + // -> execute the request + DescribeInstanceAttributeResponse EC2response = EC2SoapServiceImpl.toDescribeInstanceAttributeResponse( ServiceProvider.getInstance().getEC2Engine().describeInstances(EC2request)); + serializeResponse(response, EC2response); + } + + + private void describeSnapshots( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException + { + EC2DescribeSnapshots EC2request = new EC2DescribeSnapshots(); + + // -> load in all the "SnapshotId.n" parameters if any, and ignore any other parameters + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) + { + String key = (String)names.nextElement(); + if (key.startsWith("SnapshotId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addSnapshotId( value[0] ); + } + } + + // -> are there any filters with this request? + EC2Filter[] filterSet = extractFilters( request ); + if (null != filterSet) + { + EC2SnapshotFilterSet sfs = new EC2SnapshotFilterSet(); + for( int i=0; i < filterSet.length; i++ ) sfs.addFilter( filterSet[i] ); + EC2request.setFilterSet( sfs ); + } + + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + DescribeSnapshotsResponse EC2response = EC2SoapServiceImpl.toDescribeSnapshotsResponse( engine.handleRequest( EC2request )); + serializeResponse(response, EC2response); + } + + + private void describeVolumes( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException + { + EC2DescribeVolumes EC2request = new EC2DescribeVolumes(); + + // -> load in all the "VolumeId.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) + { + String key = (String)names.nextElement(); + if (key.startsWith("VolumeId")) + { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addVolumeId( value[0] ); + } + } + + // -> are there any filters with this request? + EC2Filter[] filterSet = extractFilters( request ); + if (null != filterSet) + { + EC2VolumeFilterSet vfs = new EC2VolumeFilterSet(); + for( int i=0; i < filterSet.length; i++ ) vfs.addFilter( filterSet[i] ); + EC2request.setFilterSet( vfs ); + } + + // -> execute the request + DescribeVolumesResponse EC2response = EC2SoapServiceImpl.toDescribeVolumesResponse( ServiceProvider.getInstance().getEC2Engine().handleRequest( EC2request )); + serializeResponse(response, EC2response); + } + + + /** + * Example of how the filters are defined in a REST request: + * https:///?Action=DescribeVolumes + * &Filter.1.Name=attachment.instance-id + * &Filter.1.Value.1=i-1a2b3c4d + * &Filter.2.Name=attachment.delete-on-termination + * &Filter.2.Value.1=true + * + * @param request + * @return List + */ + private EC2Filter[] extractFilters( HttpServletRequest request ) + { + String filterName = null; + String value = null; + EC2Filter nextFilter = null; + boolean timeFilter = false; + int filterCount = 1; + int valueCount = 1; + + List filterSet = new ArrayList(); + + do + { filterName = request.getParameter( "Filter." + filterCount + ".Name" ); + if (null != filterName) + { + nextFilter = new EC2Filter(); + nextFilter.setName( filterName ); + timeFilter = (filterName.equalsIgnoreCase( "attachment.attach-time" ) || filterName.equalsIgnoreCase( "create-time" )); + valueCount = 1; + do + { + value = request.getParameter( "Filter." + filterCount + ".Value." + valueCount ); + if (null != value) + { + // -> time values are not encoded as regexes + if ( timeFilter ) + nextFilter.addValue( value ); + else nextFilter.addValueEncoded( value ); + + valueCount++; + } + } + while( null != value ); + + filterSet.add( nextFilter ); + filterCount++; + } + } + while( null != filterName ); + + if ( 1 == filterCount ) + return null; + else return filterSet.toArray(new EC2Filter[0]); + } + + + private void describeKeyPairs(HttpServletRequest request, HttpServletResponse response) + throws ADBException, XMLStreamException, IOException { + EC2DescribeKeyPairs ec2Request = new EC2DescribeKeyPairs(); + + + String[] keyNames = request.getParameterValues( "KeyName" ); + if (keyNames != null) { + for (String keyName : keyNames) { + ec2Request.addKeyName(keyName); + } + } + EC2Filter[] filterSet = extractFilters( request ); + if (null != filterSet){ + EC2KeyPairFilterSet vfs = new EC2KeyPairFilterSet(); + for (EC2Filter filter : filterSet) { + vfs.addFilter(filter); + } + ec2Request.setKeyFilterSet(vfs); + } + + DescribeKeyPairsResponse EC2Response = EC2SoapServiceImpl.toDescribeKeyPairs( + ServiceProvider.getInstance().getEC2Engine().describeKeyPairs( ec2Request )); + serializeResponse(response, EC2Response); + } + + private void importKeyPair(HttpServletRequest request, HttpServletResponse response) + throws ADBException, XMLStreamException, IOException { + + String keyName = request.getParameter("KeyName"); + String publicKeyMaterial = request.getParameter("PublicKeyMaterial"); + if (keyName==null && publicKeyMaterial==null) { + response.sendError(530, "Missing parameter"); + return; + } + + if (!publicKeyMaterial.contains(" ")) + publicKeyMaterial = new String(Base64.decodeBase64(publicKeyMaterial.getBytes())); + + + + EC2ImportKeyPair ec2Request = new EC2ImportKeyPair(); + if (ec2Request != null) { + ec2Request.setKeyName(request.getParameter("KeyName")); + ec2Request.setPublicKeyMaterial(request.getParameter("PublicKeyMaterial")); + } + + ImportKeyPairResponse EC2Response = EC2SoapServiceImpl.toImportKeyPair( + ServiceProvider.getInstance().getEC2Engine().importKeyPair( ec2Request )); + serializeResponse(response, EC2Response); + } + + private void createKeyPair(HttpServletRequest request, HttpServletResponse response) + throws ADBException, XMLStreamException, IOException { + String keyName = request.getParameter("KeyName"); + if (keyName==null) { + response.sendError(530, "Missing KeyName parameter"); + return; + } + + EC2CreateKeyPair ec2Request = new EC2CreateKeyPair(); + if (ec2Request != null) { + ec2Request.setKeyName(keyName); + } + + CreateKeyPairResponse EC2Response = EC2SoapServiceImpl.toCreateKeyPair( + ServiceProvider.getInstance().getEC2Engine().createKeyPair(ec2Request)); + serializeResponse(response, EC2Response); + } + + private void deleteKeyPair(HttpServletRequest request, HttpServletResponse response) + throws ADBException, XMLStreamException, IOException { + String keyName = request.getParameter("KeyName"); + if (keyName==null) { + response.sendError(530, "Missing KeyName parameter"); + return; + } + + EC2DeleteKeyPair ec2Request = new EC2DeleteKeyPair(); + ec2Request.setKeyName(keyName); + + DeleteKeyPairResponse EC2Response = EC2SoapServiceImpl.toDeleteKeyPair( + ServiceProvider.getInstance().getEC2Engine().deleteKeyPair(ec2Request)); + serializeResponse(response, EC2Response); + } + + private void getPasswordData(HttpServletRequest request, HttpServletResponse response) + throws ADBException, XMLStreamException, IOException { + String instanceId = request.getParameter("InstanceId"); + if (instanceId==null) { + response.sendError(530, "Missing InstanceId parameter"); + return; + } + + GetPasswordDataResponse EC2Response = EC2SoapServiceImpl.toGetPasswordData( + ServiceProvider.getInstance().getEC2Engine().getPasswordData(instanceId)); + serializeResponse(response, EC2Response); + } + + /** + * This function implements the EC2 REST authentication algorithm. It uses the given + * "AWSAccessKeyId" parameter to look up the Cloud.com account holder's secret key which is + * used as input to the signature calculation. In addition, it tests the given "Expires" + * parameter to see if the signature has expired and if so the request fails. + */ + private boolean authenticateRequest( HttpServletRequest request, HttpServletResponse response ) + throws SignatureException, IOException, InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException + { + String cloudSecretKey = null; + String cloudAccessKey = null; + String signature = null; + String sigMethod = null; + + // [A] Basic parameters required for an authenticated rest request + // -> note that the Servlet engine will un-URL encode all parameters we extract via "getParameterValues()" calls + String[] awsAccess = request.getParameterValues( "AWSAccessKeyId" ); + if ( null != awsAccess && 0 < awsAccess.length ) + cloudAccessKey = awsAccess[0]; + else { response.sendError(530, "Missing AWSAccessKeyId parameter" ); return false; } + + String[] clientSig = request.getParameterValues( "Signature" ); + if ( null != clientSig && 0 < clientSig.length ) + signature = clientSig[0]; + else { response.sendError(530, "Missing Signature parameter" ); return false; } + + String[] method = request.getParameterValues( "SignatureMethod" ); + if ( null != method && 0 < method.length ) + { + sigMethod = method[0]; + if (!sigMethod.equals( "HmacSHA256" ) && !sigMethod.equals( "HmacSHA1" )) { + response.sendError(531, "Unsupported SignatureMethod value: " + sigMethod + " expecting: HmacSHA256 or HmacSHA1" ); + return false; + } + } + else { response.sendError(530, "Missing SignatureMethod parameter" ); return false; } + + String[] version = request.getParameterValues( "Version" ); + if ( null != version && 0 < version.length ) + { + if (!version[0].equals( wsdlVersion )) { + response.sendError(531, "Unsupported Version value: " + version[0] + " expecting: " + wsdlVersion ); + return false; + } + } + else { response.sendError(530, "Missing Version parameter" ); return false; } + + String[] sigVersion = request.getParameterValues( "SignatureVersion" ); + if ( null != sigVersion && 0 < sigVersion.length ) + { + if (!sigVersion[0].equals( "2" )) { + response.sendError(531, "Unsupported SignatureVersion value: " + sigVersion[0] + " expecting: 2" ); + return false; + } + } + else { response.sendError(530, "Missing SignatureVersion parameter" ); return false; } + + // -> can have only one but not both { Expires | Timestamp } headers + String[] expires = request.getParameterValues( "Expires" ); + if ( null != expires && 0 < expires.length ) + { + // -> contains the date and time at which the signature included in the request EXPIRES + if (hasSignatureExpired( expires[0] )) { + response.sendError(531, "Expires parameter indicates signature has expired: " + expires[0] ); + return false; + } + } + else + { // -> contains the date and time at which the request is SIGNED + String[] time = request.getParameterValues( "Timestamp" ); + if ( null == time || 0 == time.length ) { + response.sendError(530, "Missing Timestamp and Expires parameter, one is required" ); + return false; + } + } + + // [B] Use the cloudAccessKey to get the users secret key in the db + UserCredentialsDao credentialDao = new UserCredentialsDao(); + UserCredentials cloudKeys = credentialDao.getByAccessKey( cloudAccessKey ); + if ( null == cloudKeys ) + { + logger.debug( cloudAccessKey + " is not defined in the EC2 service - call SetUserKeys" ); + response.sendError(404, cloudAccessKey + " is not defined in the EC2 service - call SetUserKeys" ); + return false; + } + else cloudSecretKey = cloudKeys.getSecretKey(); + + + // [C] Verify the signature + // -> getting the query-string in this way maintains its URL encoding + EC2RestAuth restAuth = new EC2RestAuth(); + restAuth.setHostHeader( request.getHeader( "Host" )); + String requestUri = request.getRequestURI(); + + //If forwarded from another basepath: + String forwardedPath = (String) request.getAttribute("javax.servlet.forward.request_uri"); + if(forwardedPath!=null){ + requestUri=forwardedPath; + } + restAuth.setHTTPRequestURI( requestUri); + restAuth.setQueryString( request.getQueryString()); + + if ( restAuth.verifySignature( request.getMethod(), cloudSecretKey, signature, sigMethod )) { + UserContext.current().initContext( cloudAccessKey, cloudSecretKey, cloudAccessKey, "REST request", null ); + return true; + } + else throw new PermissionDeniedException("Invalid signature"); + } + + /** + * We check this to reduce replay attacks. + * + * @param timeStamp + * @return true - if the request is not longer valid, false otherwise + * @throws ParseException + */ + private boolean hasSignatureExpired( String timeStamp ) { + Calendar cal = EC2RestAuth.parseDateString( timeStamp ); + if (null == cal) return false; + + Date expiredTime = cal.getTime(); + Date today = new Date(); // -> gets set to time of creation + if ( 0 >= expiredTime.compareTo( today )) { + logger.debug( "timestamp given: [" + timeStamp + "], now: [" + today.toString() + "]" ); + return true; + } + else return false; + } + + private static void endResponse(HttpServletResponse response, String content) { + try { + byte[] data = content.getBytes(); + response.setContentLength(data.length); + OutputStream os = response.getOutputStream(); + os.write(data); + os.close(); + + } catch(Throwable e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } + } + + private void logRequest(HttpServletRequest request) { + if(logger.isInfoEnabled()) { + logger.info("EC2 Request method: " + request.getMethod()); + logger.info("Request contextPath: " + request.getContextPath()); + logger.info("Request pathInfo: " + request.getPathInfo()); + logger.info("Request pathTranslated: " + request.getPathTranslated()); + logger.info("Request queryString: " + request.getQueryString()); + logger.info("Request requestURI: " + request.getRequestURI()); + logger.info("Request requestURL: " + request.getRequestURL()); + logger.info("Request servletPath: " + request.getServletPath()); + Enumeration headers = request.getHeaderNames(); + if(headers != null) { + while(headers.hasMoreElements()) { + Object headerName = headers.nextElement(); + logger.info("Request header " + headerName + ":" + request.getHeader((String)headerName)); + } + } + + Enumeration params = request.getParameterNames(); + if(params != null) { + while(params.hasMoreElements()) { + Object paramName = params.nextElement(); + logger.info("Request parameter " + paramName + ":" + + request.getParameter((String)paramName)); + } + } + } + } + + /** + * Send out an error response according to Amazon convention. + */ + private void faultResponse(HttpServletResponse response, String errorCode, String errorMessage) { + try { + OutputStreamWriter out = new OutputStreamWriter(response.getOutputStream()); + response.setContentType("text/xml; charset=UTF-8"); + out.write(""); + out.write(""); + out.write(errorCode); + out.write(""); + out.write(errorMessage); + out.write(""); + out.write(UUID.randomUUID().toString()); + out.write(""); + out.flush(); + out.close(); + } catch (IOException e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } + } + + /** + * Serialize Axis beans to XML output. + */ + private void serializeResponse(HttpServletResponse response, ADBBean EC2Response) + throws ADBException, XMLStreamException, IOException { + OutputStream os = response.getOutputStream(); + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + XMLStreamWriter xmlWriter = xmlOutFactory.createXMLStreamWriter( os ); + MTOMAwareXMLSerializer MTOMWriter = new MTOMAwareXMLSerializer( xmlWriter ); + MTOMWriter.setDefaultNamespace("http://ec2.amazonaws.com/doc/" + wsdlVersion + "/"); + EC2Response.serialize( null, factory, MTOMWriter ); + xmlWriter.flush(); + xmlWriter.close(); + os.close(); + } +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/EC2SoapService.java b/awsapi/src/com/cloud/bridge/service/EC2SoapService.java index 6c866030240..20e3df2dcae 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2SoapService.java +++ b/awsapi/src/com/cloud/bridge/service/EC2SoapService.java @@ -1,573 +1,569 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import org.apache.log4j.Logger; - -import com.amazon.ec2.*; -<<<<<<< HEAD -======= -import com.cloud.bridge.service.controller.s3.ServiceProvider; ->>>>>>> 6472e7b... Now really adding the renamed files! - -public class EC2SoapService implements AmazonEC2SkeletonInterface { - protected final static Logger logger = Logger.getLogger(EC2SoapService.class); - - public AllocateAddressResponse allocateAddress( - AllocateAddress allocateAddress) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.allocateAddress(allocateAddress); - } - - public AssociateAddressResponse associateAddress( - AssociateAddress associateAddress) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.associateAddress(associateAddress); - } - - public AssociateDhcpOptionsResponse associateDhcpOptions( - AssociateDhcpOptions associateDhcpOptions) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.associateDhcpOptions(associateDhcpOptions); - } - - public AttachVolumeResponse attachVolume(AttachVolume attachVolume) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.attachVolume(attachVolume); - } - - public AttachVpnGatewayResponse attachVpnGateway( - AttachVpnGateway attachVpnGateway) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.attachVpnGateway(attachVpnGateway); - } - - public AuthorizeSecurityGroupIngressResponse authorizeSecurityGroupIngress( - AuthorizeSecurityGroupIngress authorizeSecurityGroupIngress) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.authorizeSecurityGroupIngress(authorizeSecurityGroupIngress); - } - - public BundleInstanceResponse bundleInstance( - BundleInstance bundleInstance) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.bundleInstance(bundleInstance); - } - - public CancelBundleTaskResponse cancelBundleTask( - CancelBundleTask cancelBundleTask) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.cancelBundleTask(cancelBundleTask); - } - - public CancelSpotInstanceRequestsResponse cancelSpotInstanceRequests( - CancelSpotInstanceRequests cancelSpotInstanceRequests) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.cancelSpotInstanceRequests(cancelSpotInstanceRequests); - } - - public ConfirmProductInstanceResponse confirmProductInstance( - ConfirmProductInstance confirmProductInstance) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.confirmProductInstance(confirmProductInstance); - } - - public CreateCustomerGatewayResponse createCustomerGateway( - CreateCustomerGateway createCustomerGateway) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createCustomerGateway(createCustomerGateway); - } - - public CreateDhcpOptionsResponse createDhcpOptions( - CreateDhcpOptions createDhcpOptions) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createDhcpOptions(createDhcpOptions); - } - - public CreateImageResponse createImage(CreateImage createImage) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createImage(createImage); - } - - public CreateKeyPairResponse createKeyPair(CreateKeyPair createKeyPair) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createKeyPair(createKeyPair); - } - - public CreateSecurityGroupResponse createSecurityGroup( - CreateSecurityGroup createSecurityGroup) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createSecurityGroup(createSecurityGroup); - } - - public CreateSnapshotResponse createSnapshot( - CreateSnapshot createSnapshot) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createSnapshot(createSnapshot); - } - - public CreateSpotDatafeedSubscriptionResponse createSpotDatafeedSubscription( - CreateSpotDatafeedSubscription createSpotDatafeedSubscription) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createSpotDatafeedSubscription(createSpotDatafeedSubscription); - } - - public CreateSubnetResponse createSubnet(CreateSubnet createSubnet) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createSubnet(createSubnet); - } - - public CreateVolumeResponse createVolume(CreateVolume createVolume) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createVolume(createVolume); - } - - public CreateVpcResponse createVpc(CreateVpc createVpc) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createVpc(createVpc); - } - - public CreateVpnConnectionResponse createVpnConnection( - CreateVpnConnection createVpnConnection) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createVpnConnection(createVpnConnection); - } - - public CreateVpnGatewayResponse createVpnGateway( - CreateVpnGateway createVpnGateway) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createVpnGateway(createVpnGateway); - } - - public DeleteCustomerGatewayResponse deleteCustomerGateway( - DeleteCustomerGateway deleteCustomerGateway) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteCustomerGateway(deleteCustomerGateway); - } - - public DeleteDhcpOptionsResponse deleteDhcpOptions( - DeleteDhcpOptions deleteDhcpOptions) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteDhcpOptions(deleteDhcpOptions); - } - - public DeleteKeyPairResponse deleteKeyPair(DeleteKeyPair deleteKeyPair) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteKeyPair(deleteKeyPair); - } - - public DeleteSecurityGroupResponse deleteSecurityGroup( - DeleteSecurityGroup deleteSecurityGroup) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteSecurityGroup(deleteSecurityGroup); - } - - public DeleteSnapshotResponse deleteSnapshot( - DeleteSnapshot deleteSnapshot) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteSnapshot(deleteSnapshot); - } - - public DeleteSpotDatafeedSubscriptionResponse deleteSpotDatafeedSubscription( - DeleteSpotDatafeedSubscription deleteSpotDatafeedSubscription) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteSpotDatafeedSubscription(deleteSpotDatafeedSubscription); - } - - public DeleteSubnetResponse deleteSubnet(DeleteSubnet deleteSubnet) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteSubnet(deleteSubnet); - } - - public DeleteVolumeResponse deleteVolume(DeleteVolume deleteVolume) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteVolume(deleteVolume); - } - - public DeleteVpcResponse deleteVpc(DeleteVpc deleteVpc) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteVpc(deleteVpc); - } - - public DeleteVpnConnectionResponse deleteVpnConnection( - DeleteVpnConnection deleteVpnConnection) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteVpnConnection(deleteVpnConnection); - } - - public DeleteVpnGatewayResponse deleteVpnGateway( - DeleteVpnGateway deleteVpnGateway) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteVpnGateway(deleteVpnGateway); - } - - public DeregisterImageResponse deregisterImage( - DeregisterImage deregisterImage) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deregisterImage(deregisterImage); - } - - public DescribeAddressesResponse describeAddresses( - DescribeAddresses describeAddresses) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeAddresses(describeAddresses); - } - - public DescribeAvailabilityZonesResponse describeAvailabilityZones( - DescribeAvailabilityZones describeAvailabilityZones) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeAvailabilityZones(describeAvailabilityZones); - } - - public DescribeBundleTasksResponse describeBundleTasks( - DescribeBundleTasks describeBundleTasks) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeBundleTasks(describeBundleTasks); - } - - public DescribeCustomerGatewaysResponse describeCustomerGateways( - DescribeCustomerGateways describeCustomerGateways) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeCustomerGateways(describeCustomerGateways); - } - - public DescribeDhcpOptionsResponse describeDhcpOptions( - DescribeDhcpOptions describeDhcpOptions) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeDhcpOptions(describeDhcpOptions); - } - - public DescribeImageAttributeResponse describeImageAttribute( - DescribeImageAttribute describeImageAttribute) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeImageAttribute(describeImageAttribute); - } - - public DescribeImagesResponse describeImages( - DescribeImages describeImages) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeImages(describeImages); - } - - public DescribeInstanceAttributeResponse describeInstanceAttribute( - DescribeInstanceAttribute describeInstanceAttribute) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeInstanceAttribute(describeInstanceAttribute); - } - - public DescribeInstancesResponse describeInstances( - DescribeInstances describeInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeInstances(describeInstances); - } - - public DescribeKeyPairsResponse describeKeyPairs( - DescribeKeyPairs describeKeyPairs) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeKeyPairs(describeKeyPairs); - } - - public DescribeRegionsResponse describeRegions( - DescribeRegions describeRegions) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeRegions(describeRegions); - } - - public DescribeReservedInstancesResponse describeReservedInstances( - DescribeReservedInstances describeReservedInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeReservedInstances(describeReservedInstances); - } - - public DescribeReservedInstancesOfferingsResponse describeReservedInstancesOfferings( - DescribeReservedInstancesOfferings describeReservedInstancesOfferings) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeReservedInstancesOfferings(describeReservedInstancesOfferings); - } - - public DescribeSecurityGroupsResponse describeSecurityGroups( - DescribeSecurityGroups describeSecurityGroups) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeSecurityGroups(describeSecurityGroups); - } - - public DescribeSnapshotAttributeResponse describeSnapshotAttribute( - DescribeSnapshotAttribute describeSnapshotAttribute) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeSnapshotAttribute(describeSnapshotAttribute); - } - - public DescribeSnapshotsResponse describeSnapshots( - DescribeSnapshots describeSnapshots) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeSnapshots(describeSnapshots); - } - - public DescribeSpotDatafeedSubscriptionResponse describeSpotDatafeedSubscription( - DescribeSpotDatafeedSubscription describeSpotDatafeedSubscription) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeSpotDatafeedSubscription(describeSpotDatafeedSubscription); - } - - public DescribeSpotInstanceRequestsResponse describeSpotInstanceRequests( - DescribeSpotInstanceRequests describeSpotInstanceRequests) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeSpotInstanceRequests(describeSpotInstanceRequests); - } - - public DescribeSpotPriceHistoryResponse describeSpotPriceHistory( - DescribeSpotPriceHistory describeSpotPriceHistory) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeSpotPriceHistory(describeSpotPriceHistory); - } - - public DescribeSubnetsResponse describeSubnets( - DescribeSubnets describeSubnets) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeSubnets(describeSubnets); - } - - public DescribeVolumesResponse describeVolumes( - DescribeVolumes describeVolumes) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeVolumes(describeVolumes); - } - - public DescribeVpcsResponse describeVpcs(DescribeVpcs describeVpcs) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeVpcs(describeVpcs); - } - - public DescribeVpnConnectionsResponse describeVpnConnections( - DescribeVpnConnections describeVpnConnections) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeVpnConnections(describeVpnConnections); - } - - public DescribeVpnGatewaysResponse describeVpnGateways( - DescribeVpnGateways describeVpnGateways) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeVpnGateways(describeVpnGateways); - } - - public DetachVolumeResponse detachVolume(DetachVolume detachVolume) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.detachVolume(detachVolume); - } - - public DetachVpnGatewayResponse detachVpnGateway( - DetachVpnGateway detachVpnGateway) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.detachVpnGateway(detachVpnGateway); - } - - public DisassociateAddressResponse disassociateAddress( - DisassociateAddress disassociateAddress) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.disassociateAddress(disassociateAddress); - } - - public GetConsoleOutputResponse getConsoleOutput( - GetConsoleOutput getConsoleOutput) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.getConsoleOutput(getConsoleOutput); - } - - public GetPasswordDataResponse getPasswordData( - GetPasswordData getPasswordData) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.getPasswordData(getPasswordData); - } - - public ModifyImageAttributeResponse modifyImageAttribute( - ModifyImageAttribute modifyImageAttribute) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.modifyImageAttribute(modifyImageAttribute); - } - - public ModifyInstanceAttributeResponse modifyInstanceAttribute( - ModifyInstanceAttribute modifyInstanceAttribute) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.modifyInstanceAttribute(modifyInstanceAttribute); - } - - public ModifySnapshotAttributeResponse modifySnapshotAttribute( - ModifySnapshotAttribute modifySnapshotAttribute) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.modifySnapshotAttribute(modifySnapshotAttribute); - } - - public MonitorInstancesResponse monitorInstances( - MonitorInstances monitorInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.monitorInstances(monitorInstances); - } - - public PurchaseReservedInstancesOfferingResponse purchaseReservedInstancesOffering( - PurchaseReservedInstancesOffering purchaseReservedInstancesOffering) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.purchaseReservedInstancesOffering(purchaseReservedInstancesOffering); - } - - public RebootInstancesResponse rebootInstances( - RebootInstances rebootInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.rebootInstances(rebootInstances); - } - - public RegisterImageResponse registerImage(RegisterImage registerImage) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.registerImage(registerImage); - } - - public ReleaseAddressResponse releaseAddress( - ReleaseAddress releaseAddress) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.releaseAddress(releaseAddress); - } - - public RequestSpotInstancesResponse requestSpotInstances( - RequestSpotInstances requestSpotInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.requestSpotInstances(requestSpotInstances); - } - - public ResetImageAttributeResponse resetImageAttribute( - ResetImageAttribute resetImageAttribute) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.resetImageAttribute(resetImageAttribute); - } - - public ResetInstanceAttributeResponse resetInstanceAttribute( - ResetInstanceAttribute resetInstanceAttribute) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.resetInstanceAttribute(resetInstanceAttribute); - } - - public ResetSnapshotAttributeResponse resetSnapshotAttribute( - ResetSnapshotAttribute resetSnapshotAttribute) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.resetSnapshotAttribute(resetSnapshotAttribute); - } - - public RevokeSecurityGroupIngressResponse revokeSecurityGroupIngress( - RevokeSecurityGroupIngress revokeSecurityGroupIngress) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.revokeSecurityGroupIngress(revokeSecurityGroupIngress); - } - - public RunInstancesResponse runInstances(RunInstances runInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.runInstances(runInstances); - } - - public StartInstancesResponse startInstances( - StartInstances startInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.startInstances(startInstances); - } - - public StopInstancesResponse stopInstances(StopInstances stopInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.stopInstances(stopInstances); - } - - public TerminateInstancesResponse terminateInstances( - TerminateInstances terminateInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.terminateInstances(terminateInstances); - } - - public UnmonitorInstancesResponse unmonitorInstances( - UnmonitorInstances unmonitorInstances) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.unmonitorInstances(unmonitorInstances); - } - - public ActivateLicenseResponse activateLicense(ActivateLicense activateLicense) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.activateLicense(activateLicense); - } - - public CreatePlacementGroupResponse createPlacementGroup(CreatePlacementGroup createPlacementGroup) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createPlacementGroup(createPlacementGroup); - } - - public DeactivateLicenseResponse deactivateLicense(DeactivateLicense deactivateLicense) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deactivateLicense(deactivateLicense); - } - - public DeletePlacementGroupResponse deletePlacementGroup(DeletePlacementGroup deletePlacementGroup) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deletePlacementGroup(deletePlacementGroup); - } - - public DescribeLicensesResponse describeLicenses(DescribeLicenses describeLicenses) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeLicenses(describeLicenses); - } - - public DescribePlacementGroupsResponse describePlacementGroups(DescribePlacementGroups describePlacementGroups) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describePlacementGroups(describePlacementGroups); - } - - public DescribeTagsResponse describeTags(DescribeTags describeTags) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeTags(describeTags); - } - - public CreateTagsResponse createTags(CreateTags createTags) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.createTags(createTags); - } - - public DeleteTagsResponse deleteTags(DeleteTags deleteTags) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.deleteTags(deleteTags); - } - - public ImportKeyPairResponse importKeyPair(ImportKeyPair importKeyPair) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.importKeyPair(importKeyPair); - } - - @Override - public CancelConversionTaskResponse cancelConversionTask(CancelConversionTask cancelConversionTask) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.cancelConversionTask(cancelConversionTask); - } - - @Override - public DescribeConversionTasksResponse describeConversionTasks(DescribeConversionTasks describeConversionTasks) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.describeConversionTasks(describeConversionTasks); - } - - @Override - public ImportInstanceResponse importInstance(ImportInstance importInstance) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.importInstance(importInstance); - } - - @Override - public ImportVolumeResponse importVolume(ImportVolume importVolume) { - AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); - return ec2Service.importVolume(importVolume); - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service; + +import org.apache.log4j.Logger; + +import com.amazon.ec2.*; + +public class EC2SoapService implements AmazonEC2SkeletonInterface { + protected final static Logger logger = Logger.getLogger(EC2SoapService.class); + + public AllocateAddressResponse allocateAddress( + AllocateAddress allocateAddress) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.allocateAddress(allocateAddress); + } + + public AssociateAddressResponse associateAddress( + AssociateAddress associateAddress) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.associateAddress(associateAddress); + } + + public AssociateDhcpOptionsResponse associateDhcpOptions( + AssociateDhcpOptions associateDhcpOptions) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.associateDhcpOptions(associateDhcpOptions); + } + + public AttachVolumeResponse attachVolume(AttachVolume attachVolume) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.attachVolume(attachVolume); + } + + public AttachVpnGatewayResponse attachVpnGateway( + AttachVpnGateway attachVpnGateway) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.attachVpnGateway(attachVpnGateway); + } + + public AuthorizeSecurityGroupIngressResponse authorizeSecurityGroupIngress( + AuthorizeSecurityGroupIngress authorizeSecurityGroupIngress) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.authorizeSecurityGroupIngress(authorizeSecurityGroupIngress); + } + + public BundleInstanceResponse bundleInstance( + BundleInstance bundleInstance) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.bundleInstance(bundleInstance); + } + + public CancelBundleTaskResponse cancelBundleTask( + CancelBundleTask cancelBundleTask) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.cancelBundleTask(cancelBundleTask); + } + + public CancelSpotInstanceRequestsResponse cancelSpotInstanceRequests( + CancelSpotInstanceRequests cancelSpotInstanceRequests) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.cancelSpotInstanceRequests(cancelSpotInstanceRequests); + } + + public ConfirmProductInstanceResponse confirmProductInstance( + ConfirmProductInstance confirmProductInstance) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.confirmProductInstance(confirmProductInstance); + } + + public CreateCustomerGatewayResponse createCustomerGateway( + CreateCustomerGateway createCustomerGateway) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createCustomerGateway(createCustomerGateway); + } + + public CreateDhcpOptionsResponse createDhcpOptions( + CreateDhcpOptions createDhcpOptions) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createDhcpOptions(createDhcpOptions); + } + + public CreateImageResponse createImage(CreateImage createImage) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createImage(createImage); + } + + public CreateKeyPairResponse createKeyPair(CreateKeyPair createKeyPair) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createKeyPair(createKeyPair); + } + + public CreateSecurityGroupResponse createSecurityGroup( + CreateSecurityGroup createSecurityGroup) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createSecurityGroup(createSecurityGroup); + } + + public CreateSnapshotResponse createSnapshot( + CreateSnapshot createSnapshot) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createSnapshot(createSnapshot); + } + + public CreateSpotDatafeedSubscriptionResponse createSpotDatafeedSubscription( + CreateSpotDatafeedSubscription createSpotDatafeedSubscription) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createSpotDatafeedSubscription(createSpotDatafeedSubscription); + } + + public CreateSubnetResponse createSubnet(CreateSubnet createSubnet) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createSubnet(createSubnet); + } + + public CreateVolumeResponse createVolume(CreateVolume createVolume) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createVolume(createVolume); + } + + public CreateVpcResponse createVpc(CreateVpc createVpc) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createVpc(createVpc); + } + + public CreateVpnConnectionResponse createVpnConnection( + CreateVpnConnection createVpnConnection) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createVpnConnection(createVpnConnection); + } + + public CreateVpnGatewayResponse createVpnGateway( + CreateVpnGateway createVpnGateway) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createVpnGateway(createVpnGateway); + } + + public DeleteCustomerGatewayResponse deleteCustomerGateway( + DeleteCustomerGateway deleteCustomerGateway) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteCustomerGateway(deleteCustomerGateway); + } + + public DeleteDhcpOptionsResponse deleteDhcpOptions( + DeleteDhcpOptions deleteDhcpOptions) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteDhcpOptions(deleteDhcpOptions); + } + + public DeleteKeyPairResponse deleteKeyPair(DeleteKeyPair deleteKeyPair) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteKeyPair(deleteKeyPair); + } + + public DeleteSecurityGroupResponse deleteSecurityGroup( + DeleteSecurityGroup deleteSecurityGroup) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteSecurityGroup(deleteSecurityGroup); + } + + public DeleteSnapshotResponse deleteSnapshot( + DeleteSnapshot deleteSnapshot) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteSnapshot(deleteSnapshot); + } + + public DeleteSpotDatafeedSubscriptionResponse deleteSpotDatafeedSubscription( + DeleteSpotDatafeedSubscription deleteSpotDatafeedSubscription) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteSpotDatafeedSubscription(deleteSpotDatafeedSubscription); + } + + public DeleteSubnetResponse deleteSubnet(DeleteSubnet deleteSubnet) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteSubnet(deleteSubnet); + } + + public DeleteVolumeResponse deleteVolume(DeleteVolume deleteVolume) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteVolume(deleteVolume); + } + + public DeleteVpcResponse deleteVpc(DeleteVpc deleteVpc) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteVpc(deleteVpc); + } + + public DeleteVpnConnectionResponse deleteVpnConnection( + DeleteVpnConnection deleteVpnConnection) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteVpnConnection(deleteVpnConnection); + } + + public DeleteVpnGatewayResponse deleteVpnGateway( + DeleteVpnGateway deleteVpnGateway) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteVpnGateway(deleteVpnGateway); + } + + public DeregisterImageResponse deregisterImage( + DeregisterImage deregisterImage) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deregisterImage(deregisterImage); + } + + public DescribeAddressesResponse describeAddresses( + DescribeAddresses describeAddresses) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeAddresses(describeAddresses); + } + + public DescribeAvailabilityZonesResponse describeAvailabilityZones( + DescribeAvailabilityZones describeAvailabilityZones) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeAvailabilityZones(describeAvailabilityZones); + } + + public DescribeBundleTasksResponse describeBundleTasks( + DescribeBundleTasks describeBundleTasks) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeBundleTasks(describeBundleTasks); + } + + public DescribeCustomerGatewaysResponse describeCustomerGateways( + DescribeCustomerGateways describeCustomerGateways) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeCustomerGateways(describeCustomerGateways); + } + + public DescribeDhcpOptionsResponse describeDhcpOptions( + DescribeDhcpOptions describeDhcpOptions) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeDhcpOptions(describeDhcpOptions); + } + + public DescribeImageAttributeResponse describeImageAttribute( + DescribeImageAttribute describeImageAttribute) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeImageAttribute(describeImageAttribute); + } + + public DescribeImagesResponse describeImages( + DescribeImages describeImages) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeImages(describeImages); + } + + public DescribeInstanceAttributeResponse describeInstanceAttribute( + DescribeInstanceAttribute describeInstanceAttribute) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeInstanceAttribute(describeInstanceAttribute); + } + + public DescribeInstancesResponse describeInstances( + DescribeInstances describeInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeInstances(describeInstances); + } + + public DescribeKeyPairsResponse describeKeyPairs( + DescribeKeyPairs describeKeyPairs) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeKeyPairs(describeKeyPairs); + } + + public DescribeRegionsResponse describeRegions( + DescribeRegions describeRegions) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeRegions(describeRegions); + } + + public DescribeReservedInstancesResponse describeReservedInstances( + DescribeReservedInstances describeReservedInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeReservedInstances(describeReservedInstances); + } + + public DescribeReservedInstancesOfferingsResponse describeReservedInstancesOfferings( + DescribeReservedInstancesOfferings describeReservedInstancesOfferings) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeReservedInstancesOfferings(describeReservedInstancesOfferings); + } + + public DescribeSecurityGroupsResponse describeSecurityGroups( + DescribeSecurityGroups describeSecurityGroups) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeSecurityGroups(describeSecurityGroups); + } + + public DescribeSnapshotAttributeResponse describeSnapshotAttribute( + DescribeSnapshotAttribute describeSnapshotAttribute) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeSnapshotAttribute(describeSnapshotAttribute); + } + + public DescribeSnapshotsResponse describeSnapshots( + DescribeSnapshots describeSnapshots) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeSnapshots(describeSnapshots); + } + + public DescribeSpotDatafeedSubscriptionResponse describeSpotDatafeedSubscription( + DescribeSpotDatafeedSubscription describeSpotDatafeedSubscription) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeSpotDatafeedSubscription(describeSpotDatafeedSubscription); + } + + public DescribeSpotInstanceRequestsResponse describeSpotInstanceRequests( + DescribeSpotInstanceRequests describeSpotInstanceRequests) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeSpotInstanceRequests(describeSpotInstanceRequests); + } + + public DescribeSpotPriceHistoryResponse describeSpotPriceHistory( + DescribeSpotPriceHistory describeSpotPriceHistory) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeSpotPriceHistory(describeSpotPriceHistory); + } + + public DescribeSubnetsResponse describeSubnets( + DescribeSubnets describeSubnets) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeSubnets(describeSubnets); + } + + public DescribeVolumesResponse describeVolumes( + DescribeVolumes describeVolumes) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeVolumes(describeVolumes); + } + + public DescribeVpcsResponse describeVpcs(DescribeVpcs describeVpcs) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeVpcs(describeVpcs); + } + + public DescribeVpnConnectionsResponse describeVpnConnections( + DescribeVpnConnections describeVpnConnections) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeVpnConnections(describeVpnConnections); + } + + public DescribeVpnGatewaysResponse describeVpnGateways( + DescribeVpnGateways describeVpnGateways) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeVpnGateways(describeVpnGateways); + } + + public DetachVolumeResponse detachVolume(DetachVolume detachVolume) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.detachVolume(detachVolume); + } + + public DetachVpnGatewayResponse detachVpnGateway( + DetachVpnGateway detachVpnGateway) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.detachVpnGateway(detachVpnGateway); + } + + public DisassociateAddressResponse disassociateAddress( + DisassociateAddress disassociateAddress) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.disassociateAddress(disassociateAddress); + } + + public GetConsoleOutputResponse getConsoleOutput( + GetConsoleOutput getConsoleOutput) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.getConsoleOutput(getConsoleOutput); + } + + public GetPasswordDataResponse getPasswordData( + GetPasswordData getPasswordData) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.getPasswordData(getPasswordData); + } + + public ModifyImageAttributeResponse modifyImageAttribute( + ModifyImageAttribute modifyImageAttribute) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.modifyImageAttribute(modifyImageAttribute); + } + + public ModifyInstanceAttributeResponse modifyInstanceAttribute( + ModifyInstanceAttribute modifyInstanceAttribute) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.modifyInstanceAttribute(modifyInstanceAttribute); + } + + public ModifySnapshotAttributeResponse modifySnapshotAttribute( + ModifySnapshotAttribute modifySnapshotAttribute) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.modifySnapshotAttribute(modifySnapshotAttribute); + } + + public MonitorInstancesResponse monitorInstances( + MonitorInstances monitorInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.monitorInstances(monitorInstances); + } + + public PurchaseReservedInstancesOfferingResponse purchaseReservedInstancesOffering( + PurchaseReservedInstancesOffering purchaseReservedInstancesOffering) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.purchaseReservedInstancesOffering(purchaseReservedInstancesOffering); + } + + public RebootInstancesResponse rebootInstances( + RebootInstances rebootInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.rebootInstances(rebootInstances); + } + + public RegisterImageResponse registerImage(RegisterImage registerImage) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.registerImage(registerImage); + } + + public ReleaseAddressResponse releaseAddress( + ReleaseAddress releaseAddress) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.releaseAddress(releaseAddress); + } + + public RequestSpotInstancesResponse requestSpotInstances( + RequestSpotInstances requestSpotInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.requestSpotInstances(requestSpotInstances); + } + + public ResetImageAttributeResponse resetImageAttribute( + ResetImageAttribute resetImageAttribute) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.resetImageAttribute(resetImageAttribute); + } + + public ResetInstanceAttributeResponse resetInstanceAttribute( + ResetInstanceAttribute resetInstanceAttribute) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.resetInstanceAttribute(resetInstanceAttribute); + } + + public ResetSnapshotAttributeResponse resetSnapshotAttribute( + ResetSnapshotAttribute resetSnapshotAttribute) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.resetSnapshotAttribute(resetSnapshotAttribute); + } + + public RevokeSecurityGroupIngressResponse revokeSecurityGroupIngress( + RevokeSecurityGroupIngress revokeSecurityGroupIngress) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.revokeSecurityGroupIngress(revokeSecurityGroupIngress); + } + + public RunInstancesResponse runInstances(RunInstances runInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.runInstances(runInstances); + } + + public StartInstancesResponse startInstances( + StartInstances startInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.startInstances(startInstances); + } + + public StopInstancesResponse stopInstances(StopInstances stopInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.stopInstances(stopInstances); + } + + public TerminateInstancesResponse terminateInstances( + TerminateInstances terminateInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.terminateInstances(terminateInstances); + } + + public UnmonitorInstancesResponse unmonitorInstances( + UnmonitorInstances unmonitorInstances) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.unmonitorInstances(unmonitorInstances); + } + + public ActivateLicenseResponse activateLicense(ActivateLicense activateLicense) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.activateLicense(activateLicense); + } + + public CreatePlacementGroupResponse createPlacementGroup(CreatePlacementGroup createPlacementGroup) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createPlacementGroup(createPlacementGroup); + } + + public DeactivateLicenseResponse deactivateLicense(DeactivateLicense deactivateLicense) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deactivateLicense(deactivateLicense); + } + + public DeletePlacementGroupResponse deletePlacementGroup(DeletePlacementGroup deletePlacementGroup) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deletePlacementGroup(deletePlacementGroup); + } + + public DescribeLicensesResponse describeLicenses(DescribeLicenses describeLicenses) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeLicenses(describeLicenses); + } + + public DescribePlacementGroupsResponse describePlacementGroups(DescribePlacementGroups describePlacementGroups) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describePlacementGroups(describePlacementGroups); + } + + public DescribeTagsResponse describeTags(DescribeTags describeTags) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeTags(describeTags); + } + + public CreateTagsResponse createTags(CreateTags createTags) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.createTags(createTags); + } + + public DeleteTagsResponse deleteTags(DeleteTags deleteTags) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.deleteTags(deleteTags); + } + + public ImportKeyPairResponse importKeyPair(ImportKeyPair importKeyPair) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.importKeyPair(importKeyPair); + } + + @Override + public CancelConversionTaskResponse cancelConversionTask(CancelConversionTask cancelConversionTask) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.cancelConversionTask(cancelConversionTask); + } + + @Override + public DescribeConversionTasksResponse describeConversionTasks(DescribeConversionTasks describeConversionTasks) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.describeConversionTasks(describeConversionTasks); + } + + @Override + public ImportInstanceResponse importInstance(ImportInstance importInstance) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.importInstance(importInstance); + } + + @Override + public ImportVolumeResponse importVolume(ImportVolume importVolume) { + AmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class); + return ec2Service.importVolume(importVolume); + } +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java b/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java index 2ac3ffc9581..34d3b80153f 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java +++ b/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java @@ -1,2184 +1,2165 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import java.util.ArrayList; -import java.util.Calendar; -import java.util.List; -import java.util.UUID; - -import org.apache.commons.codec.binary.Base64; - -import com.amazon.ec2.*; -import com.cloud.bridge.service.core.ec2.EC2Address; -import com.cloud.bridge.service.core.ec2.EC2AddressFilterSet; -import com.cloud.bridge.service.core.ec2.EC2AssociateAddress; -import com.cloud.bridge.service.core.ec2.EC2AuthorizeRevokeSecurityGroup; -import com.cloud.bridge.service.core.ec2.EC2CreateImage; -import com.cloud.bridge.service.core.ec2.EC2CreateImageResponse; -import com.cloud.bridge.service.core.ec2.EC2CreateKeyPair; -import com.cloud.bridge.service.core.ec2.EC2CreateVolume; -import com.cloud.bridge.service.core.ec2.EC2DeleteKeyPair; -import com.cloud.bridge.service.core.ec2.EC2DescribeAddresses; -import com.cloud.bridge.service.core.ec2.EC2DescribeAddressesResponse; -import com.cloud.bridge.service.core.ec2.EC2DescribeAvailabilityZones; -import com.cloud.bridge.service.core.ec2.EC2DescribeAvailabilityZonesResponse; -import com.cloud.bridge.service.core.ec2.EC2DescribeImages; -import com.cloud.bridge.service.core.ec2.EC2DescribeImagesResponse; -import com.cloud.bridge.service.core.ec2.EC2DescribeInstances; -import com.cloud.bridge.service.core.ec2.EC2DescribeInstancesResponse; -import com.cloud.bridge.service.core.ec2.EC2DescribeKeyPairs; -import com.cloud.bridge.service.core.ec2.EC2DescribeKeyPairsResponse; -import com.cloud.bridge.service.core.ec2.EC2DescribeSecurityGroups; -import com.cloud.bridge.service.core.ec2.EC2DescribeSecurityGroupsResponse; -import com.cloud.bridge.service.core.ec2.EC2DescribeSnapshots; -import com.cloud.bridge.service.core.ec2.EC2DescribeSnapshotsResponse; -import com.cloud.bridge.service.core.ec2.EC2DescribeVolumes; -import com.cloud.bridge.service.core.ec2.EC2DescribeVolumesResponse; -import com.cloud.bridge.service.core.ec2.EC2DisassociateAddress; -import com.cloud.bridge.service.core.ec2.EC2Engine; -import com.cloud.bridge.service.core.ec2.EC2Filter; -import com.cloud.bridge.service.core.ec2.EC2GroupFilterSet; -import com.cloud.bridge.service.core.ec2.EC2Image; -import com.cloud.bridge.service.core.ec2.EC2ImportKeyPair; -import com.cloud.bridge.service.core.ec2.EC2Instance; -import com.cloud.bridge.service.core.ec2.EC2InstanceFilterSet; -import com.cloud.bridge.service.core.ec2.EC2IpPermission; -import com.cloud.bridge.service.core.ec2.EC2KeyPairFilterSet; -import com.cloud.bridge.service.core.ec2.EC2PasswordData; -import com.cloud.bridge.service.core.ec2.EC2RebootInstances; -import com.cloud.bridge.service.core.ec2.EC2RegisterImage; -import com.cloud.bridge.service.core.ec2.EC2ReleaseAddress; -import com.cloud.bridge.service.core.ec2.EC2RunInstances; -import com.cloud.bridge.service.core.ec2.EC2RunInstancesResponse; -import com.cloud.bridge.service.core.ec2.EC2SSHKeyPair; -import com.cloud.bridge.service.core.ec2.EC2SecurityGroup; -import com.cloud.bridge.service.core.ec2.EC2Snapshot; -import com.cloud.bridge.service.core.ec2.EC2SnapshotFilterSet; -import com.cloud.bridge.service.core.ec2.EC2StartInstances; -import com.cloud.bridge.service.core.ec2.EC2StartInstancesResponse; -import com.cloud.bridge.service.core.ec2.EC2StopInstances; -import com.cloud.bridge.service.core.ec2.EC2StopInstancesResponse; -import com.cloud.bridge.service.core.ec2.EC2Volume; -import com.cloud.bridge.service.core.ec2.EC2VolumeFilterSet; -import com.cloud.bridge.service.exception.EC2ServiceException; -import com.cloud.bridge.service.exception.EC2ServiceException.ClientError; -import com.cloud.bridge.util.EC2RestAuth; - - -public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface { - - private static EC2Engine engine; - - @SuppressWarnings("static-access") - public EC2SoapServiceImpl(EC2Engine engine) { - this.engine = engine; - } - - public AttachVolumeResponse attachVolume(AttachVolume attachVolume) { - EC2Volume request = new EC2Volume(); - AttachVolumeType avt = attachVolume.getAttachVolume(); - - request.setId(avt.getVolumeId()); - request.setInstanceId(avt.getInstanceId()); - request.setDevice( avt.getDevice()); - return toAttachVolumeResponse( engine.attachVolume( request )); - } - - public AuthorizeSecurityGroupIngressResponse authorizeSecurityGroupIngress(AuthorizeSecurityGroupIngress authorizeSecurityGroupIngress) { - AuthorizeSecurityGroupIngressType sgit = authorizeSecurityGroupIngress.getAuthorizeSecurityGroupIngress(); - IpPermissionSetType ipPerms = sgit.getIpPermissions(); - - EC2AuthorizeRevokeSecurityGroup request = toSecurityGroup( sgit.getGroupName(), ipPerms.getItem()); - return toAuthorizeSecurityGroupIngressResponse( engine.authorizeSecurityGroup( request )); - } - - - public RevokeSecurityGroupIngressResponse revokeSecurityGroupIngress( RevokeSecurityGroupIngress revokeSecurityGroupIngress ) - { - RevokeSecurityGroupIngressType sgit = revokeSecurityGroupIngress.getRevokeSecurityGroupIngress(); - IpPermissionSetType ipPerms = sgit.getIpPermissions(); - - EC2AuthorizeRevokeSecurityGroup request = toSecurityGroup( sgit.getGroupName(), ipPerms.getItem()); - return toRevokeSecurityGroupIngressResponse( engine.revokeSecurityGroup( request )); - } - - - /** - * Authorize and Revoke Security Group Ingress have the same parameters. - */ - private EC2AuthorizeRevokeSecurityGroup toSecurityGroup( String groupName, IpPermissionType[] items ) { - EC2AuthorizeRevokeSecurityGroup request = new EC2AuthorizeRevokeSecurityGroup(); - - request.setName( groupName ); - - for (IpPermissionType ipPerm : items) { - EC2IpPermission perm = new EC2IpPermission(); - perm.setProtocol( ipPerm.getIpProtocol()); - perm.setFromPort( ipPerm.getFromPort()); - perm.setToPort( ipPerm.getToPort()); - - UserIdGroupPairSetType groups = ipPerm.getGroups(); - if (null != groups && groups.getItem() != null) { - UserIdGroupPairType[] groupItems = groups.getItem(); - for (UserIdGroupPairType groupPair : groupItems) { - EC2SecurityGroup user = new EC2SecurityGroup(); - user.setName( groupPair.getGroupName()); - user.setAccount( groupPair.getUserId()); - perm.addUser( user ); - } - } - - IpRangeSetType ranges = ipPerm.getIpRanges(); - if (ranges != null && ranges.getItem() != null) { - IpRangeItemType[] rangeItems = ranges.getItem(); - for (IpRangeItemType ipRange: rangeItems) - perm.addIpRange( ipRange.getCidrIp() ); - } - - request.addIpPermission( perm ); - } - return request; - } - - public CreateImageResponse createImage(CreateImage createImage) { - EC2CreateImage request = new EC2CreateImage(); - CreateImageType cit = createImage.getCreateImage(); - - request.setInstanceId( cit.getInstanceId()); - request.setName( cit.getName()); - request.setDescription( cit.getDescription()); - return toCreateImageResponse( engine.createImage(request)); - } - - public CreateSecurityGroupResponse createSecurityGroup(CreateSecurityGroup createSecurityGroup) { - CreateSecurityGroupType sgt = createSecurityGroup.getCreateSecurityGroup(); - - return toCreateSecurityGroupResponse( engine.createSecurityGroup(sgt.getGroupName(), sgt.getGroupDescription())); - } - - public CreateSnapshotResponse createSnapshot(CreateSnapshot createSnapshot) { - CreateSnapshotType cst = createSnapshot.getCreateSnapshot(); - return toCreateSnapshotResponse( engine.createSnapshot( cst.getVolumeId()), engine); - } - - public CreateVolumeResponse createVolume(CreateVolume createVolume) { - EC2CreateVolume request = new EC2CreateVolume(); - CreateVolumeType cvt = createVolume.getCreateVolume(); - - request.setSize( cvt.getSize()); - request.setSnapshotId(cvt.getSnapshotId() != null ? cvt.getSnapshotId() : null); - request.setZoneName( cvt.getAvailabilityZone()); - return toCreateVolumeResponse( engine.createVolume( request )); - } - - public DeleteSecurityGroupResponse deleteSecurityGroup(DeleteSecurityGroup deleteSecurityGroup) { - DeleteSecurityGroupType sgt = deleteSecurityGroup.getDeleteSecurityGroup(); - return toDeleteSecurityGroupResponse( engine.deleteSecurityGroup( sgt.getGroupName())); - } - - public DeleteSnapshotResponse deleteSnapshot(DeleteSnapshot deleteSnapshot) { - DeleteSnapshotType dst = deleteSnapshot.getDeleteSnapshot(); - return toDeleteSnapshotResponse( engine.deleteSnapshot( dst.getSnapshotId())); - } - - public DeleteVolumeResponse deleteVolume(DeleteVolume deleteVolume) { - EC2Volume request = new EC2Volume(); - DeleteVolumeType avt = deleteVolume.getDeleteVolume(); - - request.setId(avt.getVolumeId()); - return toDeleteVolumeResponse( engine.deleteVolume( request )); - } - - public DeregisterImageResponse deregisterImage(DeregisterImage deregisterImage) { - DeregisterImageType dit = deregisterImage.getDeregisterImage(); - EC2Image image = new EC2Image(); - - image.setId( dit.getImageId()); - return toDeregisterImageResponse( engine.deregisterImage( image )); - } - - public DescribeAvailabilityZonesResponse describeAvailabilityZones(DescribeAvailabilityZones describeAvailabilityZones) { - EC2DescribeAvailabilityZones request = new EC2DescribeAvailabilityZones(); - - DescribeAvailabilityZonesType dazt = describeAvailabilityZones.getDescribeAvailabilityZones(); - DescribeAvailabilityZonesSetType dazs = dazt.getAvailabilityZoneSet(); - DescribeAvailabilityZonesSetItemType[] items = dazs.getItem(); - if (null != items) { // -> can be empty - for( int i=0; i < items.length; i++ ) request.addZone( items[i].getZoneName()); - } - return toDescribeAvailabilityZonesResponse( engine.handleRequest( request )); - } - - /** - * This only supports a query about description. - */ - public DescribeImageAttributeResponse describeImageAttribute(DescribeImageAttribute describeImageAttribute) { - EC2DescribeImages request = new EC2DescribeImages(); - DescribeImageAttributeType diat = describeImageAttribute.getDescribeImageAttribute(); - DescribeImageAttributesGroup diag = diat.getDescribeImageAttributesGroup(); - EmptyElementType description = diag.getDescription(); - - if ( null != description ) { - request.addImageSet(diat.getImageId()); - return toDescribeImageAttributeResponse( engine.describeImages( request )); - } - else throw new EC2ServiceException( "Unsupported - only description supported", 501 ); - } - - public DescribeImagesResponse describeImages(DescribeImages describeImages) { - EC2DescribeImages request = new EC2DescribeImages(); - DescribeImagesType dit = describeImages.getDescribeImages(); - - // -> toEC2DescribeImages - DescribeImagesExecutableBySetType param1 = dit.getExecutableBySet(); - if (null != param1) { - DescribeImagesExecutableByType[] items1 = param1.getItem(); - if (null != items1) { - for( int i=0; i < items1.length; i++ ) request.addExecutableBySet( items1[i].getUser()); - } - } - DescribeImagesInfoType param2 = dit.getImagesSet(); - if (null != param2) { - DescribeImagesItemType[] items2 = param2.getItem(); - if (null != items2) { - for( int i=0; i < items2.length; i++ ) request.addImageSet( items2[i].getImageId()); - } - } - DescribeImagesOwnersType param3 = dit.getOwnersSet(); - if (null != param3) { - DescribeImagesOwnerType[] items3 = param3.getItem(); - if (null != items3) { - for( int i=0; i < items3.length; i++ ) request.addOwnersSet( items3[i].getOwner()); - } - } - - return toDescribeImagesResponse( engine.describeImages( request )); - } - - public DescribeInstanceAttributeResponse describeInstanceAttribute(DescribeInstanceAttribute describeInstanceAttribute) { - EC2DescribeInstances request = new EC2DescribeInstances(); - DescribeInstanceAttributeType diat = describeInstanceAttribute.getDescribeInstanceAttribute(); - DescribeInstanceAttributesGroup diag = diat.getDescribeInstanceAttributesGroup(); - EmptyElementType instanceType = diag.getInstanceType(); - - // -> toEC2DescribeInstances - if (null != instanceType) { - request.addInstanceId( diat.getInstanceId()); - return toDescribeInstanceAttributeResponse( engine.describeInstances( request )); - } - throw new EC2ServiceException( "Unsupported - only instanceType supported", 501 ); - } - - - public DescribeInstancesResponse describeInstances( DescribeInstances describeInstances ) - { - EC2DescribeInstances request = new EC2DescribeInstances(); - DescribeInstancesType dit = describeInstances.getDescribeInstances(); - FilterSetType fst = dit.getFilterSet(); - - // -> toEC2DescribeInstances - DescribeInstancesInfoType diit = dit.getInstancesSet(); - DescribeInstancesItemType[] items = diit.getItem(); - if (null != items) { // -> can be empty - for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); - } - - if (null != fst) { - request.setFilterSet( toInstanceFilterSet( fst )); - } - - return toDescribeInstancesResponse( engine.describeInstances( request ), engine ); - } - - - @Override - public DescribeAddressesResponse describeAddresses(DescribeAddresses describeAddresses) { - EC2DescribeAddresses ec2Request = new EC2DescribeAddresses(); - DescribeAddressesType dat = describeAddresses.getDescribeAddresses(); - - DescribeAddressesInfoType dait = dat.getPublicIpsSet(); - DescribeAddressesItemType[] items = dait.getItem(); - if (items != null) { // -> can be empty - for (DescribeAddressesItemType itemType : items) - ec2Request.addPublicIp( itemType.getPublicIp()); - } - - FilterSetType fset = dat.getFilterSet(); - if (fset != null) { - ec2Request.setFilterSet(toAddressFilterSet(fset)); - } - - return toDescribeAddressesResponse( engine.describeAddresses( ec2Request )); - } - - @Override - public AllocateAddressResponse allocateAddress(AllocateAddress allocateAddress) { - return toAllocateAddressResponse( engine.allocateAddress()); - } - - @Override - public ReleaseAddressResponse releaseAddress(ReleaseAddress releaseAddress) { - EC2ReleaseAddress request = new EC2ReleaseAddress(); - - request.setPublicIp(releaseAddress.getReleaseAddress().getPublicIp()); - - return toReleaseAddressResponse( engine.releaseAddress( request ) ); - } - - @Override - public AssociateAddressResponse associateAddress(AssociateAddress associateAddress) { - EC2AssociateAddress request = new EC2AssociateAddress(); - - request.setPublicIp(associateAddress.getAssociateAddress().getPublicIp()); - request.setInstanceId(associateAddress.getAssociateAddress().getInstanceId()); - - return toAssociateAddressResponse( engine.associateAddress( request ) ); - } - - @Override - public DisassociateAddressResponse disassociateAddress(DisassociateAddress disassociateAddress) { - EC2DisassociateAddress request = new EC2DisassociateAddress(); - - request.setPublicIp(disassociateAddress.getDisassociateAddress().getPublicIp()); - - return toDisassociateAddressResponse( engine.disassociateAddress( request ) ); - } - - public DescribeSecurityGroupsResponse describeSecurityGroups(DescribeSecurityGroups describeSecurityGroups) - { - EC2DescribeSecurityGroups request = new EC2DescribeSecurityGroups(); - - DescribeSecurityGroupsType sgt = describeSecurityGroups.getDescribeSecurityGroups(); - - FilterSetType fst = sgt.getFilterSet(); - - // -> toEC2DescribeSecurityGroups - DescribeSecurityGroupsSetType sgst = sgt.getSecurityGroupSet(); - DescribeSecurityGroupsSetItemType[] items = sgst.getItem(); - if (null != items) { // -> can be empty - for (DescribeSecurityGroupsSetItemType item :items) request.addGroupName(item.getGroupName()); - } - - if (null != fst) { - request.setFilterSet( toGroupFilterSet( fst )); - } - - - return toDescribeSecurityGroupsResponse( engine.describeSecurityGroups( request )); - } - - public DescribeSnapshotsResponse describeSnapshots(DescribeSnapshots describeSnapshots) - { - EC2DescribeSnapshots request = new EC2DescribeSnapshots(); - DescribeSnapshotsType dst = describeSnapshots.getDescribeSnapshots(); - - DescribeSnapshotsSetType dsst = dst.getSnapshotSet(); - FilterSetType fst = dst.getFilterSet(); - - if (null != dsst) - { - DescribeSnapshotsSetItemType[] items = dsst.getItem(); - if (null != items) { - for( int i=0; i < items.length; i++ ) request.addSnapshotId( items[i].getSnapshotId()); - } - } - - if (null != fst) - { - String[] timeFilters = new String[1]; - timeFilters[0] = new String( "start-time" ); - request.setFilterSet( toSnapshotFilterSet( fst, timeFilters )); - } - - return toDescribeSnapshotsResponse(engine.handleRequest(request)); - } - - - public DescribeVolumesResponse describeVolumes(DescribeVolumes describeVolumes) - { - EC2DescribeVolumes request = new EC2DescribeVolumes(); - DescribeVolumesType dvt = describeVolumes.getDescribeVolumes(); - - DescribeVolumesSetType dvst = dvt.getVolumeSet(); - FilterSetType fst = dvt.getFilterSet(); - - if (null != dvst) - { - DescribeVolumesSetItemType[] items = dvst.getItem(); - if (null != items) { - for( int i=0; i < items.length; i++ ) request.addVolumeId( items[i].getVolumeId()); - } - } - - if (null != fst) - { - String[] timeFilters = new String[2]; - timeFilters[0] = new String( "attachment.attach-time" ); - timeFilters[1] = new String( "create-time" ); - request.setFilterSet( toVolumeFilterSet( fst, timeFilters )); - } - - return toDescribeVolumesResponse( engine.handleRequest( request )); - } - - public DetachVolumeResponse detachVolume(DetachVolume detachVolume) { - EC2Volume request = new EC2Volume(); - DetachVolumeType avt = detachVolume.getDetachVolume(); - - request.setId(avt.getVolumeId()); - request.setInstanceId(avt.getInstanceId()); - request.setDevice( avt.getDevice()); - return toDetachVolumeResponse( engine.detachVolume( request )); - } - - public ModifyImageAttributeResponse modifyImageAttribute(ModifyImageAttribute modifyImageAttribute) { - // TODO: This is broken - EC2Image request = new EC2Image(); - - ModifyImageAttributeType miat = modifyImageAttribute.getModifyImageAttribute(); - ModifyImageAttributeTypeChoice_type0 item = miat.getModifyImageAttributeTypeChoice_type0(); - - AttributeValueType description = item.getDescription(); - /* - LaunchPermissionOperationType launchPermOp = item.getLaunchPermission(); - ProductCodeListType prodCodeList =item.getProductCodes(); - */ - - if (null != description) { - request.setId( miat.getImageId()); - request.setDescription(description.getValue()); - return toModifyImageAttributeResponse( engine.modifyImageAttribute( request )); - } - throw new EC2ServiceException( "Unsupported - can only modify image description", 501 ); - } - - /** - * Did not find a matching service offering so for now we just return disabled - * for each instance request. We could verify that all of the specified instances - * exist to detect an error which would require a listVirtualMachines. - */ - public MonitorInstancesResponse monitorInstances(MonitorInstances monitorInstances) { - MonitorInstancesResponse response = new MonitorInstancesResponse(); - MonitorInstancesResponseType param1 = new MonitorInstancesResponseType(); - MonitorInstancesResponseSetType param2 = new MonitorInstancesResponseSetType(); - - MonitorInstancesType mit = monitorInstances.getMonitorInstances(); - MonitorInstancesSetType mist = mit.getInstancesSet(); - MonitorInstancesSetItemType[] misit = mist.getItem(); - - if (null != misit) { - for( int i=0; i < misit.length; i++ ) { - String instanceId = misit[i].getInstanceId(); - MonitorInstancesResponseSetItemType param3 = new MonitorInstancesResponseSetItemType(); - param3.setInstanceId( instanceId ); - InstanceMonitoringStateType param4 = new InstanceMonitoringStateType(); - param4.setState( "disabled" ); - param3.setMonitoring( param4 ); - param2.addItem( param3 ); - } - } - - param1.setRequestId( UUID.randomUUID().toString()); - param1.setInstancesSet( param2 ); - response.setMonitorInstancesResponse( param1 ); - return response; - } - - public RebootInstancesResponse rebootInstances(RebootInstances rebootInstances) { - EC2RebootInstances request = new EC2RebootInstances(); - RebootInstancesType rit = rebootInstances.getRebootInstances(); - - // -> toEC2StartInstances - RebootInstancesInfoType rist = rit.getInstancesSet(); - RebootInstancesItemType[] items = rist.getItem(); - if (null != items) { // -> should not be empty - for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); - } - return toRebootInstancesResponse( engine.rebootInstances( request )); - } - - - /** - * Processes ec2-register - * - * @param - * - * @see RegisterImage - */ - public RegisterImageResponse registerImage(RegisterImage registerImage) { - EC2RegisterImage request = new EC2RegisterImage(); - RegisterImageType rit = registerImage.getRegisterImage(); - - // -> we redefine the architecture field to hold: "format:zonename:osTypeName", - // these are the bare minimum that we need to call the cloud registerTemplate call. - request.setLocation( rit.getImageLocation()); // -> should be a URL for us - request.setName( rit.getName()); - request.setDescription( rit.getDescription()); - request.setArchitecture( rit.getArchitecture()); - return toRegisterImageResponse( engine.registerImage( request )); - } - - /** - * Processes ec2-reset-image-attribute - * - * @param resetImageAttribute - * - * @see ResetInstanceAttribute - */ - - public ResetImageAttributeResponse resetImageAttribute(ResetImageAttribute resetImageAttribute) { - EC2Image request = new EC2Image(); - ResetImageAttributeType riat = resetImageAttribute.getResetImageAttribute(); - - request.setId( riat.getImageId()); - request.setDescription( "" ); - return toResetImageAttributeResponse( engine.modifyImageAttribute( request )); - } - - /** - * ec2-run-instances - * - * @param runInstances - * - * @see RunInstances - */ - public RunInstancesResponse runInstances(RunInstances runInstances) { - RunInstancesType rit = runInstances.getRunInstances(); - GroupSetType gst = rit.getGroupSet(); - PlacementRequestType prt = rit.getPlacement(); - UserDataType userData = rit.getUserData(); - String type = rit.getInstanceType(); - String keyName = rit.getKeyName(); - - EC2RunInstances request = new EC2RunInstances(); - - request.setTemplateId(rit.getImageId()); - request.setMinCount(rit.getMinCount()); - request.setMaxCount(rit.getMaxCount()); - if (null != type) request.setInstanceType(type); - if (null != prt) request.setZoneName(prt.getAvailabilityZone()); - if (null != userData) request.setUserData(userData.getData()); - if (null != keyName) request.setKeyName(rit.getKeyName() ); - - // -> we can only support one group per instance - if (null != gst) { - GroupItemType[] items = gst.getItem(); - if (null != items && 0 < items.length) request.setGroupId( items[0].getGroupId()); - } - return toRunInstancesResponse( engine.runInstances( request ), engine); - } - - public StartInstancesResponse startInstances(StartInstances startInstances) { - EC2StartInstances request = new EC2StartInstances(); - StartInstancesType sit = startInstances.getStartInstances(); - - // -> toEC2StartInstances - InstanceIdSetType iist = sit.getInstancesSet(); - InstanceIdType[] items = iist.getItem(); - if (null != items) { // -> should not be empty - for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); - } - return toStartInstancesResponse( engine.startInstances( request )); - } - - public StopInstancesResponse stopInstances(StopInstances stopInstances) { - EC2StopInstances request = new EC2StopInstances(); - StopInstancesType sit = stopInstances.getStopInstances(); - - // -> toEC2StopInstances - InstanceIdSetType iist = sit.getInstancesSet(); - InstanceIdType[] items = iist.getItem(); - if (null != items) { // -> should not be empty - for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); - } - return toStopInstancesResponse( engine.stopInstances( request )); - } - - /** - * Mapping this to the destroyVirtualMachine cloud API concept. - * This makes sense since when considering the rebootInstances function. In reboot - * any terminated instances are left alone. We will do the same with destroyed instances. - */ - public TerminateInstancesResponse terminateInstances(TerminateInstances terminateInstances) { - EC2StopInstances request = new EC2StopInstances(); - TerminateInstancesType sit = terminateInstances.getTerminateInstances(); - - // -> toEC2StopInstances - InstanceIdSetType iist = sit.getInstancesSet(); - InstanceIdType[] items = iist.getItem(); - if (null != items) { // -> should not be empty - for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); - } - - request.setDestroyInstances( true ); - return toTermInstancesResponse( engine.stopInstances( request )); - } - - /** - * See comment for monitorInstances. - */ - public UnmonitorInstancesResponse unmonitorInstances(UnmonitorInstances unmonitorInstances) { - UnmonitorInstancesResponse response = new UnmonitorInstancesResponse(); - MonitorInstancesResponseType param1 = new MonitorInstancesResponseType(); - MonitorInstancesResponseSetType param2 = new MonitorInstancesResponseSetType(); - - MonitorInstancesType mit = unmonitorInstances.getUnmonitorInstances(); - MonitorInstancesSetType mist = mit.getInstancesSet(); - MonitorInstancesSetItemType[] items = mist.getItem(); - - if (null != items) { - for( int i=0; i < items.length; i++ ) { - String instanceId = items[i].getInstanceId(); - MonitorInstancesResponseSetItemType param3 = new MonitorInstancesResponseSetItemType(); - param3.setInstanceId( instanceId ); - InstanceMonitoringStateType param4 = new InstanceMonitoringStateType(); - param4.setState( "disabled" ); - param3.setMonitoring( param4 ); - param2.addItem( param3 ); - } - } - - param1.setInstancesSet( param2 ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setUnmonitorInstancesResponse( param1 ); - return response; - } - - - public static DescribeImageAttributeResponse toDescribeImageAttributeResponse(EC2DescribeImagesResponse engineResponse) { - DescribeImageAttributeResponse response = new DescribeImageAttributeResponse(); - DescribeImageAttributeResponseType param1 = new DescribeImageAttributeResponseType(); - - EC2Image[] imageSet = engineResponse.getImageSet(); - if ( 0 < imageSet.length ) { - DescribeImageAttributeResponseTypeChoice_type0 param2 = new DescribeImageAttributeResponseTypeChoice_type0(); - NullableAttributeValueType param3 = new NullableAttributeValueType(); - param3.setValue( imageSet[0].getDescription()); - param2.setDescription( param3 ); - param1.setDescribeImageAttributeResponseTypeChoice_type0( param2 ); - param1.setImageId( imageSet[0].getId()); - } - - param1.setRequestId( UUID.randomUUID().toString()); - response.setDescribeImageAttributeResponse( param1 ); - return response; - } - - public static ModifyImageAttributeResponse toModifyImageAttributeResponse( boolean engineResponse ) { - ModifyImageAttributeResponse response = new ModifyImageAttributeResponse(); - ModifyImageAttributeResponseType param1 = new ModifyImageAttributeResponseType(); - - param1.set_return( engineResponse ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setModifyImageAttributeResponse( param1 ); - return response; - } - - public static ResetImageAttributeResponse toResetImageAttributeResponse( boolean engineResponse ) { - ResetImageAttributeResponse response = new ResetImageAttributeResponse(); - ResetImageAttributeResponseType param1 = new ResetImageAttributeResponseType(); - - param1.set_return( engineResponse ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setResetImageAttributeResponse( param1 ); - return response; - } - - public static DescribeImagesResponse toDescribeImagesResponse(EC2DescribeImagesResponse engineResponse ) { - DescribeImagesResponse response = new DescribeImagesResponse(); - DescribeImagesResponseType param1 = new DescribeImagesResponseType(); - DescribeImagesResponseInfoType param2 = new DescribeImagesResponseInfoType(); - - EC2Image[] images = engineResponse.getImageSet(); - for( int i=0; i < images.length; i++ ) { - String accountName = images[i].getAccountName(); - String domainId = images[i].getDomainId(); - String ownerId = domainId + ":" + accountName; - - DescribeImagesResponseItemType param3 = new DescribeImagesResponseItemType(); - param3.setImageId( images[i].getId()); - param3.setImageLocation( "" ); - param3.setImageState( (images[i].getIsReady() ? "available" : "unavailable" )); - param3.setImageOwnerId(ownerId); - param3.setIsPublic( images[i].getIsPublic()); - - ProductCodesSetType param4 = new ProductCodesSetType(); - ProductCodesSetItemType param5 = new ProductCodesSetItemType(); - param5.setProductCode( "" ); - param4.addItem( param5 ); - param3.setProductCodes( param4 ); - - String description = images[i].getDescription(); - param3.setDescription( (null == description ? "" : description)); - - if (null == description) param3.setArchitecture( "" ); - else if (-1 != description.indexOf( "x86_64" )) param3.setArchitecture( "x86_64" ); - else if (-1 != description.indexOf( "i386" )) param3.setArchitecture( "i386" ); - else param3.setArchitecture( "" ); - - param3.setImageType( "machine" ); - param3.setKernelId( "" ); - param3.setRamdiskId( "" ); - param3.setPlatform( "" ); - - StateReasonType param6 = new StateReasonType(); - param6.setCode( "" ); - param6.setMessage( "" ); - param3.setStateReason( param6 ); - - param3.setImageOwnerAlias( "" ); - param3.setName( images[i].getName()); - param3.setRootDeviceType( "" ); - param3.setRootDeviceName( "" ); - - BlockDeviceMappingType param7 = new BlockDeviceMappingType(); - BlockDeviceMappingItemType param8 = new BlockDeviceMappingItemType(); - BlockDeviceMappingItemTypeChoice_type0 param9 = new BlockDeviceMappingItemTypeChoice_type0(); - param8.setDeviceName( "" ); - param9.setVirtualName( "" ); - EbsBlockDeviceType param10 = new EbsBlockDeviceType(); - param10.setSnapshotId( "" ); - param10.setVolumeSize( 0 ); - param10.setDeleteOnTermination( false ); - param9.setEbs( param10 ); - param8.setBlockDeviceMappingItemTypeChoice_type0( param9 ); - param7.addItem( param8 ); - - param3.setBlockDeviceMapping( param7 ); - param2.addItem( param3 ); - } - - param1.setImagesSet( param2 ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setDescribeImagesResponse( param1 ); - return response; - } - - public static CreateImageResponse toCreateImageResponse(EC2CreateImageResponse engineResponse) { - CreateImageResponse response = new CreateImageResponse(); - CreateImageResponseType param1 = new CreateImageResponseType(); - - param1.setImageId( engineResponse.getId()); - param1.setRequestId( UUID.randomUUID().toString()); - response.setCreateImageResponse( param1 ); - return response; - } - - public static RegisterImageResponse toRegisterImageResponse(EC2CreateImageResponse engineResponse) { - RegisterImageResponse response = new RegisterImageResponse(); - RegisterImageResponseType param1 = new RegisterImageResponseType(); - - param1.setImageId( engineResponse.getId()); - param1.setRequestId( UUID.randomUUID().toString()); - response.setRegisterImageResponse( param1 ); - return response; - } - - public static DeregisterImageResponse toDeregisterImageResponse( boolean engineResponse) { - DeregisterImageResponse response = new DeregisterImageResponse(); - DeregisterImageResponseType param1 = new DeregisterImageResponseType(); - - param1.set_return( engineResponse ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setDeregisterImageResponse( param1 ); - return response; - } - - // filtersets - private EC2AddressFilterSet toAddressFilterSet( FilterSetType fst ) { - EC2AddressFilterSet vfs = new EC2AddressFilterSet(); - - FilterType[] items = fst.getItem(); - if (items != null) { - // -> each filter can have one or more values associated with it - for (FilterType item : items) { - EC2Filter oneFilter = new EC2Filter(); - String filterName = item.getName(); - oneFilter.setName( filterName ); - - ValueSetType vst = item.getValueSet(); - ValueType[] valueItems = vst.getItem(); - for (ValueType valueItem : valueItems) { - oneFilter.addValueEncoded( valueItem.getValue()); - } - vfs.addFilter( oneFilter ); - } - } - return vfs; - } - - private EC2KeyPairFilterSet toKeyPairFilterSet( FilterSetType fst ) - { - EC2KeyPairFilterSet vfs = new EC2KeyPairFilterSet(); - - FilterType[] items = fst.getItem(); - if (items != null) { - // -> each filter can have one or more values associated with it - for (FilterType item : items) { - EC2Filter oneFilter = new EC2Filter(); - String filterName = item.getName(); - oneFilter.setName( filterName ); - - ValueSetType vst = item.getValueSet(); - ValueType[] valueItems = vst.getItem(); - for (ValueType valueItem : valueItems) { - oneFilter.addValueEncoded( valueItem.getValue()); - } - vfs.addFilter( oneFilter ); - } - } - return vfs; - } - - - private EC2VolumeFilterSet toVolumeFilterSet( FilterSetType fst, String[] timeStrs ) - { - EC2VolumeFilterSet vfs = new EC2VolumeFilterSet(); - boolean timeFilter = false; - - FilterType[] items = fst.getItem(); - if (null != items) - { - // -> each filter can have one or more values associated with it - for( int j=0; j < items.length; j++ ) - { - EC2Filter oneFilter = new EC2Filter(); - String filterName = items[j].getName(); - oneFilter.setName( filterName ); - - // -> is the filter one of the xsd:dateTime filters? - timeFilter = false; - for( int m=0; m < timeStrs.length; m++ ) - { - timeFilter = filterName.equalsIgnoreCase( timeStrs[m] ); - if (timeFilter) break; - } - - ValueSetType vst = items[j].getValueSet(); - ValueType[] valueItems = vst.getItem(); - for( int k=0; k < valueItems.length; k++ ) - { - // -> time values are not encoded as regexes - if ( timeFilter ) - oneFilter.addValue( valueItems[k].getValue()); - else oneFilter.addValueEncoded( valueItems[k].getValue()); - } - vfs.addFilter( oneFilter ); - } - } - return vfs; - } - - - private EC2SnapshotFilterSet toSnapshotFilterSet( FilterSetType fst, String[] timeStrs ) - { - EC2SnapshotFilterSet vfs = new EC2SnapshotFilterSet(); - boolean timeFilter = false; - - FilterType[] items = fst.getItem(); - if (null != items) - { - // -> each filter can have one or more values associated with it - for( int j=0; j < items.length; j++ ) - { - EC2Filter oneFilter = new EC2Filter(); - String filterName = items[j].getName(); - oneFilter.setName( filterName ); - - // -> is the filter one of the xsd:dateTime filters? - timeFilter = false; - for( int m=0; m < timeStrs.length; m++ ) - { - timeFilter = filterName.equalsIgnoreCase( timeStrs[m] ); - if (timeFilter) break; - } - - ValueSetType vst = items[j].getValueSet(); - ValueType[] valueItems = vst.getItem(); - for( int k=0; k < valueItems.length; k++ ) - { - // -> time values are not encoded as regexes - if ( timeFilter ) - oneFilter.addValue( valueItems[k].getValue()); - else oneFilter.addValueEncoded( valueItems[k].getValue()); - } - vfs.addFilter( oneFilter ); - } - } - return vfs; - } - - - // TODO make these filter set functions use generics - private EC2GroupFilterSet toGroupFilterSet( FilterSetType fst ) - { - EC2GroupFilterSet gfs = new EC2GroupFilterSet(); - - FilterType[] items = fst.getItem(); - if (null != items) - { - // -> each filter can have one or more values associated with it - for( int j=0; j < items.length; j++ ) - { - EC2Filter oneFilter = new EC2Filter(); - String filterName = items[j].getName(); - oneFilter.setName( filterName ); - - ValueSetType vst = items[j].getValueSet(); - ValueType[] valueItems = vst.getItem(); - for( int k=0; k < valueItems.length; k++ ) - { - oneFilter.addValueEncoded( valueItems[k].getValue()); - } - gfs.addFilter( oneFilter ); - } - } - return gfs; - } - - - private EC2InstanceFilterSet toInstanceFilterSet( FilterSetType fst ) - { - EC2InstanceFilterSet ifs = new EC2InstanceFilterSet(); - - FilterType[] items = fst.getItem(); - if (null != items) - { - // -> each filter can have one or more values associated with it - for( int j=0; j < items.length; j++ ) - { - EC2Filter oneFilter = new EC2Filter(); - String filterName = items[j].getName(); - oneFilter.setName( filterName ); - - ValueSetType vst = items[j].getValueSet(); - ValueType[] valueItems = vst.getItem(); - for( int k=0; k < valueItems.length; k++ ) - { - oneFilter.addValueEncoded( valueItems[k].getValue()); - } - ifs.addFilter( oneFilter ); - } - } - return ifs; - } - - // toMethods - public static DescribeVolumesResponse toDescribeVolumesResponse( EC2DescribeVolumesResponse engineResponse ) - { - DescribeVolumesResponse response = new DescribeVolumesResponse(); - DescribeVolumesResponseType param1 = new DescribeVolumesResponseType(); - DescribeVolumesSetResponseType param2 = new DescribeVolumesSetResponseType(); - - EC2Volume[] volumes = engineResponse.getVolumeSet(); - for (EC2Volume vol : volumes) { - DescribeVolumesSetItemResponseType param3 = new DescribeVolumesSetItemResponseType(); - param3.setVolumeId( vol.getId().toString()); - - Long volSize = new Long(vol.getSize()); - param3.setSize(volSize.toString()); - String snapId = vol.getSnapshotId() != null ? vol.getSnapshotId().toString() : ""; - param3.setSnapshotId(snapId); - param3.setAvailabilityZone( vol.getZoneName()); - param3.setStatus( vol.getState()); - - // -> CloudStack seems to have issues with timestamp formats so just in case - Calendar cal = EC2RestAuth.parseDateString(vol.getCreated()); - if (cal == null) { - cal = Calendar.getInstance(); - cal.set( 1970, 1, 1 ); - } - param3.setCreateTime( cal ); - - AttachmentSetResponseType param4 = new AttachmentSetResponseType(); - if (null != vol.getInstanceId()) { - AttachmentSetItemResponseType param5 = new AttachmentSetItemResponseType(); - param5.setVolumeId(vol.getId().toString()); - param5.setInstanceId(vol.getInstanceId().toString()); - String devicePath = engine.cloudDeviceIdToDevicePath( vol.getHypervisor(), vol.getDeviceId()); - param5.setDevice( devicePath ); - param5.setStatus( toVolumeAttachmentState( vol.getInstanceId(), vol.getVMState())); - param5.setAttachTime( cal ); - param5.setDeleteOnTermination( false ); - param4.addItem( param5 ); - } - - param3.setAttachmentSet( param4 ); - - // -> try to generate an empty tag does not seem to work - ResourceTagSetType param6 = new ResourceTagSetType(); - ResourceTagSetItemType param7 = new ResourceTagSetItemType(); - param7.setKey(""); - param7.setValue(""); - param6.addItem( param7 ); - param3.setTagSet( param6 ); - param2.addItem( param3 ); - } - param1.setVolumeSet( param2 ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setDescribeVolumesResponse( param1 ); - return response; - } - - - public static DescribeInstanceAttributeResponse toDescribeInstanceAttributeResponse(EC2DescribeInstancesResponse engineResponse) { - DescribeInstanceAttributeResponse response = new DescribeInstanceAttributeResponse(); - DescribeInstanceAttributeResponseType param1 = new DescribeInstanceAttributeResponseType(); - - EC2Instance[] instanceSet = engineResponse.getInstanceSet(); - if (0 < instanceSet.length) { - DescribeInstanceAttributeResponseTypeChoice_type0 param2 = new DescribeInstanceAttributeResponseTypeChoice_type0(); - NullableAttributeValueType param3 = new NullableAttributeValueType(); - param3.setValue( instanceSet[0].getServiceOffering()); - param2.setInstanceType( param3 ); - param1.setDescribeInstanceAttributeResponseTypeChoice_type0( param2 ); - param1.setInstanceId( instanceSet[0].getId()); - } - param1.setRequestId( UUID.randomUUID().toString()); - response.setDescribeInstanceAttributeResponse( param1 ); - return response; - } - - - public static DescribeInstancesResponse toDescribeInstancesResponse(EC2DescribeInstancesResponse engineResponse, EC2Engine engine) - { - DescribeInstancesResponse response = new DescribeInstancesResponse(); - DescribeInstancesResponseType param1 = new DescribeInstancesResponseType(); - ReservationSetType param2 = new ReservationSetType(); - - EC2Instance[] instances = engineResponse.getInstanceSet(); - - for (EC2Instance inst:instances) { - String accountName = inst.getAccountName(); - String domainId = inst.getDomainId(); - String ownerId = domainId + ":" + accountName; - - ReservationInfoType param3 = new ReservationInfoType(); - - param3.setReservationId( inst.getId()); // -> an id we could track down if needed - param3.setOwnerId(ownerId); - param3.setRequesterId( "" ); - - GroupSetType param4 = new GroupSetType(); - GroupItemType param5 = new GroupItemType(); - param5.setGroupId( (null == inst.getGroup() ? "" : inst.getGroup())); - param4.addItem( param5 ); - param3.setGroupSet( param4 ); - - RunningInstancesSetType param6 = new RunningInstancesSetType(); - RunningInstancesItemType param7 = new RunningInstancesItemType(); - - param7.setInstanceId( inst.getId()); - param7.setImageId( inst.getTemplateId()); - - InstanceStateType param8 = new InstanceStateType(); - param8.setCode( toAmazonCode( inst.getState())); - param8.setName( toAmazonStateName( inst.getState())); - param7.setInstanceState( param8 ); - - param7.setPrivateDnsName( "" ); - param7.setDnsName( "" ); - param7.setReason( "" ); - param7.setKeyName( "" ); - param7.setAmiLaunchIndex( "" ); - param7.setInstanceType( inst.getServiceOffering()); - - ProductCodesSetType param9 = new ProductCodesSetType(); - ProductCodesSetItemType param10 = new ProductCodesSetItemType(); - param10.setProductCode( "" ); - param9.addItem( param10 ); - param7.setProductCodes( param9 ); - - Calendar cal = inst.getCreated(); - if ( null == cal ) { - cal = Calendar.getInstance(); -// cal.set( 1970, 1, 1 ); - } - param7.setLaunchTime( cal ); - - PlacementResponseType param11 = new PlacementResponseType(); - param11.setAvailabilityZone( inst.getZoneName()); - param11.setGroupName( "" ); - param7.setPlacement( param11 ); - param7.setKernelId( "" ); - param7.setRamdiskId( "" ); - param7.setPlatform( "" ); - - InstanceMonitoringStateType param12 = new InstanceMonitoringStateType(); - param12.setState( "" ); - param7.setMonitoring( param12 ); - param7.setSubnetId( "" ); - param7.setVpcId( "" ); -// String ipAddr = inst.getPrivateIpAddress(); -// param7.setPrivateIpAddress((null != ipAddr ? ipAddr : "")); - param7.setPrivateIpAddress(inst.getIpAddress()); - param7.setIpAddress( inst.getIpAddress()); - - StateReasonType param13 = new StateReasonType(); - param13.setCode( "" ); - param13.setMessage( "" ); - param7.setStateReason( param13 ); - param7.setArchitecture( "" ); - param7.setRootDeviceType( "" ); - String devicePath = engine.cloudDeviceIdToDevicePath( inst.getHypervisor(), inst.getRootDeviceId()); - param7.setRootDeviceName( devicePath ); - - InstanceBlockDeviceMappingResponseType param14 = new InstanceBlockDeviceMappingResponseType(); - InstanceBlockDeviceMappingResponseItemType param15 = new InstanceBlockDeviceMappingResponseItemType(); - InstanceBlockDeviceMappingResponseItemTypeChoice_type0 param16 = new InstanceBlockDeviceMappingResponseItemTypeChoice_type0(); - param15.setDeviceName( "" ); - EbsInstanceBlockDeviceMappingResponseType param17 = new EbsInstanceBlockDeviceMappingResponseType(); - param17.setVolumeId( "" ); - param17.setStatus( "" ); - param17.setAttachTime( cal ); - - param17.setDeleteOnTermination( true ); - param16.setEbs( param17 ); - param15.setInstanceBlockDeviceMappingResponseItemTypeChoice_type0( param16 ); - param14.addItem( param15 ); - param7.setBlockDeviceMapping( param14 ); - param7.setInstanceLifecycle( "" ); - param7.setSpotInstanceRequestId( "" ); - - param6.addItem( param7 ); - param3.setInstancesSet( param6 ); - param2.addItem( param3 ); - } - param1.setReservationSet( param2 ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setDescribeInstancesResponse( param1 ); - return response; - } - - - public static DescribeAddressesResponse toDescribeAddressesResponse(EC2DescribeAddressesResponse engineResponse) { - List items = new ArrayList(); - EC2Address[] addressSet = engineResponse.getAddressSet(); - - for (EC2Address addr: addressSet) { - DescribeAddressesResponseItemType item = new DescribeAddressesResponseItemType(); - item.setPublicIp(addr.getIpAddress()); - item.setInstanceId(addr.getAssociatedInstanceId()); - items.add(item); - } - DescribeAddressesResponseInfoType descAddrRespInfoType = new DescribeAddressesResponseInfoType(); - descAddrRespInfoType.setItem(items.toArray(new DescribeAddressesResponseItemType[0])); - - DescribeAddressesResponseType descAddrRespType = new DescribeAddressesResponseType(); - descAddrRespType.setRequestId(UUID.randomUUID().toString()); - descAddrRespType.setAddressesSet(descAddrRespInfoType); - - DescribeAddressesResponse descAddrResp = new DescribeAddressesResponse(); - descAddrResp.setDescribeAddressesResponse(descAddrRespType); - - return descAddrResp; - } - - public static AllocateAddressResponse toAllocateAddressResponse(final EC2Address ec2Address) { - AllocateAddressResponse response = new AllocateAddressResponse(); - AllocateAddressResponseType param1 = new AllocateAddressResponseType(); - - param1.setPublicIp(ec2Address.getIpAddress()); - param1.setRequestId(UUID.randomUUID().toString()); - response.setAllocateAddressResponse(param1); - return response; - } - - public static ReleaseAddressResponse toReleaseAddressResponse(final boolean result) { - ReleaseAddressResponse response = new ReleaseAddressResponse(); - ReleaseAddressResponseType param1 = new ReleaseAddressResponseType(); - - param1.set_return(result); - param1.setRequestId(UUID.randomUUID().toString()); - - response.setReleaseAddressResponse(param1); - return response; - } - - public static AssociateAddressResponse toAssociateAddressResponse(final boolean result) { - AssociateAddressResponse response = new AssociateAddressResponse(); - AssociateAddressResponseType param1 = new AssociateAddressResponseType(); - - param1.setRequestId(UUID.randomUUID().toString()); - param1.set_return(result); - - response.setAssociateAddressResponse(param1); - return response; - } - - public static DisassociateAddressResponse toDisassociateAddressResponse(final boolean result) { - DisassociateAddressResponse response = new DisassociateAddressResponse(); - DisassociateAddressResponseType param1 = new DisassociateAddressResponseType(); - - param1.setRequestId(UUID.randomUUID().toString()); - param1.set_return(result); - - response.setDisassociateAddressResponse(param1); - return response; - } - - /** - * Map our cloud state values into what Amazon defines. - * Where are the values that can be returned by our cloud api defined? - * - * @param cloudState - * @return - */ - public static int toAmazonCode( String cloudState ) - { - if (null == cloudState) return 48; - - if (cloudState.equalsIgnoreCase( "Destroyed" )) return 48; - else if (cloudState.equalsIgnoreCase( "Stopped" )) return 80; - else if (cloudState.equalsIgnoreCase( "Running" )) return 16; - else if (cloudState.equalsIgnoreCase( "Starting" )) return 0; - else if (cloudState.equalsIgnoreCase( "Stopping" )) return 64; - else if (cloudState.equalsIgnoreCase( "Error" )) return 1; - else if (cloudState.equalsIgnoreCase( "Expunging" )) return 48; - else return 16; - } - - public static String toAmazonStateName( String cloudState ) - { - if (null == cloudState) return new String( "terminated" ); - - if (cloudState.equalsIgnoreCase( "Destroyed" )) return new String( "terminated" ); - else if (cloudState.equalsIgnoreCase( "Stopped" )) return new String( "stopped" ); - else if (cloudState.equalsIgnoreCase( "Running" )) return new String( "running" ); - else if (cloudState.equalsIgnoreCase( "Starting" )) return new String( "pending" ); - else if (cloudState.equalsIgnoreCase( "Stopping" )) return new String( "stopping" ); - else if (cloudState.equalsIgnoreCase( "Error" )) return new String( "error" ); - else if (cloudState.equalsIgnoreCase( "Expunging" )) return new String( "terminated"); - else return new String( "running" ); - } - - /** - * We assume a state for the volume based on what its associated VM is doing. - * - * @param vmId - * @param vmState - * @return - */ - public static String toVolumeAttachmentState(String instanceId, String vmState ) { - if (null == instanceId || null == vmState) return "detached"; - - if (vmState.equalsIgnoreCase( "Destroyed" )) return "detached"; - else if (vmState.equalsIgnoreCase( "Stopped" )) return "attached"; - else if (vmState.equalsIgnoreCase( "Running" )) return "attached"; - else if (vmState.equalsIgnoreCase( "Starting" )) return "attaching"; - else if (vmState.equalsIgnoreCase( "Stopping" )) return "attached"; - else if (vmState.equalsIgnoreCase( "Error" )) return "detached"; - else return "detached"; - } - - public static StopInstancesResponse toStopInstancesResponse(EC2StopInstancesResponse engineResponse) { - StopInstancesResponse response = new StopInstancesResponse(); - StopInstancesResponseType param1 = new StopInstancesResponseType(); - InstanceStateChangeSetType param2 = new InstanceStateChangeSetType(); - - EC2Instance[] instances = engineResponse.getInstanceSet(); - for( int i=0; i < instances.length; i++ ) { - InstanceStateChangeType param3 = new InstanceStateChangeType(); - param3.setInstanceId( instances[i].getId()); - - InstanceStateType param4 = new InstanceStateType(); - param4.setCode( toAmazonCode( instances[i].getState())); - param4.setName( toAmazonStateName( instances[i].getState())); - param3.setCurrentState( param4 ); - - InstanceStateType param5 = new InstanceStateType(); - param5.setCode( toAmazonCode( instances[i].getPreviousState() )); - param5.setName( toAmazonStateName( instances[i].getPreviousState() )); - param3.setPreviousState( param5 ); - - param2.addItem( param3 ); - } - - param1.setRequestId( UUID.randomUUID().toString()); - param1.setInstancesSet( param2 ); - response.setStopInstancesResponse( param1 ); - return response; - } - - public static StartInstancesResponse toStartInstancesResponse(EC2StartInstancesResponse engineResponse) { - StartInstancesResponse response = new StartInstancesResponse(); - StartInstancesResponseType param1 = new StartInstancesResponseType(); - InstanceStateChangeSetType param2 = new InstanceStateChangeSetType(); - - EC2Instance[] instances = engineResponse.getInstanceSet(); - for( int i=0; i < instances.length; i++ ) { - InstanceStateChangeType param3 = new InstanceStateChangeType(); - param3.setInstanceId( instances[i].getId()); - - InstanceStateType param4 = new InstanceStateType(); - param4.setCode( toAmazonCode( instances[i].getState())); - param4.setName( toAmazonStateName( instances[i].getState())); - param3.setCurrentState( param4 ); - - InstanceStateType param5 = new InstanceStateType(); - param5.setCode( toAmazonCode( instances[i].getPreviousState() )); - param5.setName( toAmazonStateName( instances[i].getPreviousState() )); - param3.setPreviousState( param5 ); - - param2.addItem( param3 ); - } - - param1.setRequestId( UUID.randomUUID().toString()); - param1.setInstancesSet( param2 ); - response.setStartInstancesResponse( param1 ); - return response; - } - - public static TerminateInstancesResponse toTermInstancesResponse(EC2StopInstancesResponse engineResponse) { - TerminateInstancesResponse response = new TerminateInstancesResponse(); - TerminateInstancesResponseType param1 = new TerminateInstancesResponseType(); - InstanceStateChangeSetType param2 = new InstanceStateChangeSetType(); - - EC2Instance[] instances = engineResponse.getInstanceSet(); - for( int i=0; i < instances.length; i++ ) { - InstanceStateChangeType param3 = new InstanceStateChangeType(); - param3.setInstanceId( instances[i].getId()); - - InstanceStateType param4 = new InstanceStateType(); - param4.setCode( toAmazonCode( instances[i].getState())); - param4.setName( toAmazonStateName( instances[i].getState())); - param3.setCurrentState( param4 ); - - InstanceStateType param5 = new InstanceStateType(); - param5.setCode( toAmazonCode( instances[i].getPreviousState() )); - param5.setName( toAmazonStateName( instances[i].getPreviousState() )); - param3.setPreviousState( param5 ); - - param2.addItem( param3 ); - } - - param1.setRequestId( UUID.randomUUID().toString()); - param1.setInstancesSet( param2 ); - response.setTerminateInstancesResponse( param1 ); - return response; - } - - public static RebootInstancesResponse toRebootInstancesResponse(boolean engineResponse) { - RebootInstancesResponse response = new RebootInstancesResponse(); - RebootInstancesResponseType param1 = new RebootInstancesResponseType(); - - param1.setRequestId( UUID.randomUUID().toString()); - param1.set_return( engineResponse ); - response.setRebootInstancesResponse( param1 ); - return response; - } - - public static RunInstancesResponse toRunInstancesResponse(EC2RunInstancesResponse engineResponse, EC2Engine engine ) { - RunInstancesResponse response = new RunInstancesResponse(); - RunInstancesResponseType param1 = new RunInstancesResponseType(); - - param1.setReservationId( "" ); - - GroupSetType param2 = new GroupSetType(); - GroupItemType param3 = new GroupItemType(); - param3.setGroupId( "" ); - param2.addItem( param3 ); - param1.setGroupSet( param2 ); - - RunningInstancesSetType param6 = new RunningInstancesSetType(); - EC2Instance[] instances = engineResponse.getInstanceSet(); - for (EC2Instance inst : instances) { - RunningInstancesItemType param7 = new RunningInstancesItemType(); - param7.setInstanceId( inst.getId()); - param7.setImageId( inst.getTemplateId()); - - String accountName = inst.getAccountName(); - String domainId = inst.getDomainId(); - String ownerId = domainId + ":" + accountName; - - param1.setOwnerId(ownerId); - - InstanceStateType param8 = new InstanceStateType(); - param8.setCode( toAmazonCode( inst.getState())); - param8.setName( toAmazonStateName( inst.getState())); - param7.setInstanceState( param8 ); - - param7.setPrivateDnsName( "" ); - param7.setDnsName( "" ); - param7.setReason( "" ); - param7.setKeyName( "" ); - param7.setAmiLaunchIndex( "" ); - - ProductCodesSetType param9 = new ProductCodesSetType(); - ProductCodesSetItemType param10 = new ProductCodesSetItemType(); - param10.setProductCode( "" ); - param9.addItem( param10 ); - param7.setProductCodes( param9 ); - - param7.setInstanceType( inst.getServiceOffering()); - // -> CloudStack seems to have issues with timestamp formats so just in case - Calendar cal = inst.getCreated(); - if ( null == cal ) { - cal = Calendar.getInstance(); - cal.set( 1970, 1, 1 ); - } - param7.setLaunchTime( cal ); - - PlacementResponseType param11 = new PlacementResponseType(); - param11.setAvailabilityZone( inst.getZoneName()); - param7.setPlacement( param11 ); - - param7.setKernelId( "" ); - param7.setRamdiskId( "" ); - param7.setPlatform( "" ); - - InstanceMonitoringStateType param12 = new InstanceMonitoringStateType(); - param12.setState( "" ); - param7.setMonitoring( param12 ); - param7.setSubnetId( "" ); - param7.setVpcId( "" ); - String ipAddr = inst.getPrivateIpAddress(); - param7.setPrivateIpAddress((null != ipAddr ? ipAddr : "")); - param7.setIpAddress( inst.getIpAddress()); - - StateReasonType param13 = new StateReasonType(); - param13.setCode( "" ); - param13.setMessage( "" ); - param7.setStateReason( param13 ); - param7.setArchitecture( "" ); - param7.setRootDeviceType( "" ); - param7.setRootDeviceName( "" ); - - InstanceBlockDeviceMappingResponseType param14 = new InstanceBlockDeviceMappingResponseType(); - InstanceBlockDeviceMappingResponseItemType param15 = new InstanceBlockDeviceMappingResponseItemType(); - InstanceBlockDeviceMappingResponseItemTypeChoice_type0 param16 = new InstanceBlockDeviceMappingResponseItemTypeChoice_type0(); - param15.setDeviceName( "" ); - EbsInstanceBlockDeviceMappingResponseType param17 = new EbsInstanceBlockDeviceMappingResponseType(); - param17.setVolumeId( "" ); - param17.setStatus( "" ); - param17.setAttachTime( cal ); - param17.setDeleteOnTermination( true ); - param16.setEbs( param17 ); - param15.setInstanceBlockDeviceMappingResponseItemTypeChoice_type0( param16 ); - param14.addItem( param15 ); - param7.setBlockDeviceMapping( param14 ); - - param7.setInstanceLifecycle( "" ); - param7.setSpotInstanceRequestId( "" ); - param7.setVirtualizationType( "" ); - param7.setClientToken( "" ); - - ResourceTagSetType param18 = new ResourceTagSetType(); - ResourceTagSetItemType param19 = new ResourceTagSetItemType(); - param19.setKey(""); - param19.setValue(""); - param18.addItem( param19 ); - param7.setTagSet( param18 ); - - String hypervisor = inst.getHypervisor(); - param7.setHypervisor((null != hypervisor ? hypervisor : "")); - param6.addItem( param7 ); - } - param1.setInstancesSet( param6 ); - param1.setRequesterId( "" ); - - param1.setRequestId( UUID.randomUUID().toString()); - response.setRunInstancesResponse( param1 ); - return response; - } - - public static DescribeAvailabilityZonesResponse toDescribeAvailabilityZonesResponse(EC2DescribeAvailabilityZonesResponse engineResponse) { - DescribeAvailabilityZonesResponse response = new DescribeAvailabilityZonesResponse(); - DescribeAvailabilityZonesResponseType param1 = new DescribeAvailabilityZonesResponseType(); - AvailabilityZoneSetType param2 = new AvailabilityZoneSetType(); - - String[] zones = engineResponse.getZoneSet(); - for (String zone : zones) { - AvailabilityZoneItemType param3 = new AvailabilityZoneItemType(); - AvailabilityZoneMessageSetType param4 = new AvailabilityZoneMessageSetType(); - param3.setZoneName( zone ); - param3.setZoneState( "available" ); - param3.setRegionName( "" ); - param3.setMessageSet( param4 ); - param2.addItem( param3 ); - } - - param1.setRequestId( UUID.randomUUID().toString()); - param1.setAvailabilityZoneInfo( param2 ); - response.setDescribeAvailabilityZonesResponse( param1 ); - return response; - } - - public static AttachVolumeResponse toAttachVolumeResponse(EC2Volume engineResponse) { - AttachVolumeResponse response = new AttachVolumeResponse(); - AttachVolumeResponseType param1 = new AttachVolumeResponseType(); - - Calendar cal = Calendar.getInstance(); - - // -> if the instanceId was not given in the request then we have no way to get it - param1.setVolumeId( engineResponse.getId().toString()); - param1.setInstanceId( engineResponse.getInstanceId().toString()); - param1.setDevice( engineResponse.getDevice()); - if ( null != engineResponse.getState()) - param1.setStatus( engineResponse.getState()); - else param1.setStatus( "" ); // ToDo - throw an Soap Fault - - param1.setAttachTime( cal ); - - param1.setRequestId( UUID.randomUUID().toString()); - response.setAttachVolumeResponse( param1 ); - return response; - } - - public static DetachVolumeResponse toDetachVolumeResponse(EC2Volume engineResponse) { - DetachVolumeResponse response = new DetachVolumeResponse(); - DetachVolumeResponseType param1 = new DetachVolumeResponseType(); - Calendar cal = Calendar.getInstance(); - cal.set( 1970, 1, 1 ); // return one value, Unix Epoch, what else can we return? - - param1.setVolumeId( engineResponse.getId().toString()); - param1.setInstanceId( (null == engineResponse.getInstanceId() ? "" : engineResponse.getInstanceId().toString())); - param1.setDevice( (null == engineResponse.getDevice() ? "" : engineResponse.getDevice())); - if ( null != engineResponse.getState()) - param1.setStatus( engineResponse.getState()); - else param1.setStatus( "" ); // ToDo - throw an Soap Fault - - param1.setAttachTime( cal ); - - param1.setRequestId( UUID.randomUUID().toString()); - response.setDetachVolumeResponse( param1 ); - return response; - } - - public static CreateVolumeResponse toCreateVolumeResponse(EC2Volume engineResponse) { - CreateVolumeResponse response = new CreateVolumeResponse(); - CreateVolumeResponseType param1 = new CreateVolumeResponseType(); - - param1.setVolumeId( engineResponse.getId().toString()); - Long volSize = new Long( engineResponse.getSize()); - param1.setSize( volSize.toString()); - param1.setSnapshotId( "" ); - param1.setAvailabilityZone( engineResponse.getZoneName()); - if ( null != engineResponse.getState()) - param1.setStatus( engineResponse.getState()); - else param1.setStatus( "" ); // ToDo - throw an Soap Fault - - // -> CloudStack seems to have issues with timestamp formats so just in case - Calendar cal = EC2RestAuth.parseDateString(engineResponse.getCreated()); - if ( null == cal ) { - cal = Calendar.getInstance(); -// cal.set( 1970, 1, 1 ); - } - param1.setCreateTime( cal ); - - param1.setRequestId( UUID.randomUUID().toString()); - response.setCreateVolumeResponse( param1 ); - return response; - } - - public static DeleteVolumeResponse toDeleteVolumeResponse(EC2Volume engineResponse) { - DeleteVolumeResponse response = new DeleteVolumeResponse(); - DeleteVolumeResponseType param1 = new DeleteVolumeResponseType(); - - if ( null != engineResponse.getState()) - param1.set_return( true ); - else param1.set_return( false ); // ToDo - supposed to return an error - - param1.setRequestId( UUID.randomUUID().toString()); - response.setDeleteVolumeResponse( param1 ); - return response; - } - - public static DescribeSnapshotsResponse toDescribeSnapshotsResponse(EC2DescribeSnapshotsResponse engineResponse) { - DescribeSnapshotsResponse response = new DescribeSnapshotsResponse(); - DescribeSnapshotsResponseType param1 = new DescribeSnapshotsResponseType(); - DescribeSnapshotsSetResponseType param2 = new DescribeSnapshotsSetResponseType(); - - EC2Snapshot[] snaps = engineResponse.getSnapshotSet(); - for (EC2Snapshot snap : snaps) { - DescribeSnapshotsSetItemResponseType param3 = new DescribeSnapshotsSetItemResponseType(); -<<<<<<< HEAD - param3.setSnapshotId( snap.getId()); - param3.setVolumeId( snap.getVolumeId()); - - // our semantics are different than those ec2 uses - if (snap.getState().equalsIgnoreCase("backedup")) { - param3.setStatus("completed"); - param3.setProgress("100%"); - } else if (snap.getState().equalsIgnoreCase("creating")) { - param3.setStatus("pending"); - param3.setProgress("33%"); - } else if (snap.getState().equalsIgnoreCase("backingup")) { - param3.setStatus("pending"); - param3.setProgress("66%"); - } else { - // if we see anything besides: backedup/creating/backingup, we assume error - param3.setStatus("error"); - param3.setProgress("0%"); - } -// param3.setStatus( snap.getState()); - - String ownerId = snap.getDomainId() + ":" + snap.getAccountName(); -======= - param3.setSnapshotId( snap.getId().toString()); - param3.setVolumeId( snap.getVolumeId().toString()); - param3.setStatus( snap.getState()); - - String accountName = snap.getAccountName(); - String domainId = snap.getDomainId().toString(); - String ownerId = domainId + ":" + accountName; ->>>>>>> 6472e7b... Now really adding the renamed files! - - // -> CloudStack seems to have issues with timestamp formats so just in case - Calendar cal = snap.getCreated(); - if ( null == cal ) { - cal = Calendar.getInstance(); - cal.set( 1970, 1, 1 ); - } - param3.setStartTime( cal ); - -<<<<<<< HEAD - param3.setOwnerId(ownerId); - param3.setVolumeSize( snap.getVolumeSize().toString()); - param3.setDescription( snap.getName()); - param3.setOwnerAlias( snap.getAccountName() ); -======= - param3.setProgress( "" ); - param3.setOwnerId(ownerId); - Long volSize = new Long( snap.getVolumeSize()); - param3.setVolumeSize( volSize.toString()); - param3.setDescription( snap.getName()); - param3.setOwnerAlias( "" ); ->>>>>>> 6472e7b... Now really adding the renamed files! - - ResourceTagSetType param18 = new ResourceTagSetType(); - ResourceTagSetItemType param19 = new ResourceTagSetItemType(); - param19.setKey(""); - param19.setValue(""); - param18.addItem( param19 ); - param3.setTagSet( param18 ); - param2.addItem( param3 ); - } - - param1.setSnapshotSet( param2 ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setDescribeSnapshotsResponse( param1 ); - return response; - } - - public static DeleteSnapshotResponse toDeleteSnapshotResponse( boolean engineResponse ) { - DeleteSnapshotResponse response = new DeleteSnapshotResponse(); - DeleteSnapshotResponseType param1 = new DeleteSnapshotResponseType(); - - param1.set_return( engineResponse ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setDeleteSnapshotResponse( param1 ); - return response; - } - - public static CreateSnapshotResponse toCreateSnapshotResponse(EC2Snapshot engineResponse, EC2Engine engine ) { - CreateSnapshotResponse response = new CreateSnapshotResponse(); - CreateSnapshotResponseType param1 = new CreateSnapshotResponseType(); - - String accountName = engineResponse.getAccountName(); - String domainId = engineResponse.getDomainId().toString(); - String ownerId = domainId + ":" + accountName; - - param1.setSnapshotId( engineResponse.getId().toString()); - param1.setVolumeId( engineResponse.getVolumeId().toString()); - param1.setStatus( "completed" ); - - // -> CloudStack seems to have issues with timestamp formats so just in case - Calendar cal = engineResponse.getCreated(); - if ( null == cal ) { - cal = Calendar.getInstance(); - cal.set( 1970, 1, 1 ); - } - param1.setStartTime( cal ); - - param1.setProgress( "100" ); - param1.setOwnerId(ownerId); - Long volSize = new Long( engineResponse.getVolumeSize()); - param1.setVolumeSize( volSize.toString()); - param1.setDescription( engineResponse.getName()); - param1.setRequestId( UUID.randomUUID().toString()); - response.setCreateSnapshotResponse( param1 ); - return response; - } - - public static DescribeSecurityGroupsResponse toDescribeSecurityGroupsResponse( - EC2DescribeSecurityGroupsResponse engineResponse) { - DescribeSecurityGroupsResponse response = new DescribeSecurityGroupsResponse(); - DescribeSecurityGroupsResponseType param1 = new DescribeSecurityGroupsResponseType(); - SecurityGroupSetType param2 = new SecurityGroupSetType(); - - EC2SecurityGroup[] groups = engineResponse.getGroupSet(); - for (EC2SecurityGroup group : groups) { - SecurityGroupItemType param3 = new SecurityGroupItemType(); - String accountName = group.getAccountName(); - String domainId = group.getDomainId(); - String ownerId = domainId + ":" + accountName; - - param3.setOwnerId(ownerId); - param3.setGroupName(group.getName()); - String desc = group.getDescription(); - param3.setGroupDescription((null != desc ? desc : "")); - - IpPermissionSetType param4 = new IpPermissionSetType(); - EC2IpPermission[] perms = group.getIpPermissionSet(); - for (EC2IpPermission perm : perms) { - // TODO: Fix kludges like this... - if (perm == null) - continue; - IpPermissionType param5 = new IpPermissionType(); - param5.setIpProtocol(perm.getProtocol()); - param5.setFromPort(perm.getFromPort()); - param5.setToPort(perm.getToPort()); - - // -> user groups - EC2SecurityGroup[] userSet = perm.getUserSet(); - if (null == userSet || 0 == userSet.length) { - UserIdGroupPairSetType param8 = new UserIdGroupPairSetType(); - param5.setGroups(param8); - } else { - for (EC2SecurityGroup secGroup : userSet) { - UserIdGroupPairSetType param8 = new UserIdGroupPairSetType(); - UserIdGroupPairType param9 = new UserIdGroupPairType(); - param9.setUserId(secGroup.getAccount()); - param9.setGroupName(secGroup.getName()); - param8.addItem(param9); - param5.setGroups(param8); - } - } - - // -> or CIDR list - String[] rangeSet = perm.getIpRangeSet(); - if (null == rangeSet || 0 == rangeSet.length) { - IpRangeSetType param6 = new IpRangeSetType(); - param5.setIpRanges(param6); - } else { - for (String range : rangeSet) { - // TODO: This needs further attention... - if (range == null) { - range = ""; - } - IpRangeSetType param6 = new IpRangeSetType(); - IpRangeItemType param7 = new IpRangeItemType(); - param7.setCidrIp(range); - param6.addItem(param7); - param5.setIpRanges(param6); - } - } - param4.addItem(param5); - } - param3.setIpPermissions(param4); - param2.addItem(param3); - } - param1.setSecurityGroupInfo(param2); - param1.setRequestId(UUID.randomUUID().toString()); - response.setDescribeSecurityGroupsResponse(param1); - return response; - } - - public static CreateSecurityGroupResponse toCreateSecurityGroupResponse( boolean success ) { - CreateSecurityGroupResponse response = new CreateSecurityGroupResponse(); - CreateSecurityGroupResponseType param1 = new CreateSecurityGroupResponseType(); - - param1.set_return(success); - param1.setRequestId( UUID.randomUUID().toString()); - response.setCreateSecurityGroupResponse( param1 ); - return response; - } - - public static DeleteSecurityGroupResponse toDeleteSecurityGroupResponse( boolean success ) { - DeleteSecurityGroupResponse response = new DeleteSecurityGroupResponse(); - DeleteSecurityGroupResponseType param1 = new DeleteSecurityGroupResponseType(); - - param1.set_return( success ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setDeleteSecurityGroupResponse( param1 ); - return response; - } - - public static AuthorizeSecurityGroupIngressResponse toAuthorizeSecurityGroupIngressResponse( boolean success ) { - AuthorizeSecurityGroupIngressResponse response = new AuthorizeSecurityGroupIngressResponse(); - AuthorizeSecurityGroupIngressResponseType param1 = new AuthorizeSecurityGroupIngressResponseType(); - - param1.set_return( success ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setAuthorizeSecurityGroupIngressResponse( param1 ); - return response; - } - - public static RevokeSecurityGroupIngressResponse toRevokeSecurityGroupIngressResponse( boolean success ) { - RevokeSecurityGroupIngressResponse response = new RevokeSecurityGroupIngressResponse(); - RevokeSecurityGroupIngressResponseType param1 = new RevokeSecurityGroupIngressResponseType(); - - param1.set_return( success ); - param1.setRequestId( UUID.randomUUID().toString()); - response.setRevokeSecurityGroupIngressResponse( param1 ); - return response; - } - - public DescribeKeyPairsResponse describeKeyPairs(DescribeKeyPairs describeKeyPairs) { - - EC2DescribeKeyPairs ec2Request = new EC2DescribeKeyPairs(); - - // multiple keynames may be provided - DescribeKeyPairsInfoType kset = describeKeyPairs.getDescribeKeyPairs().getKeySet(); - if (kset != null) { - DescribeKeyPairsItemType[] keyPairKeys = kset.getItem(); - if (keyPairKeys != null) { - for (DescribeKeyPairsItemType key : keyPairKeys) { - ec2Request.addKeyName(key.getKeyName()); - } - } - } - - // multiple filters may be provided - FilterSetType fset = describeKeyPairs.getDescribeKeyPairs().getFilterSet(); - if (fset != null) { - ec2Request.setKeyFilterSet(toKeyPairFilterSet(fset)); - } - - return toDescribeKeyPairs(engine.describeKeyPairs(ec2Request)); - } - - public static DescribeKeyPairsResponse toDescribeKeyPairs(final EC2DescribeKeyPairsResponse response) { - EC2SSHKeyPair[] keyPairs = response.getKeyPairSet(); - - DescribeKeyPairsResponseInfoType respInfoType = new DescribeKeyPairsResponseInfoType(); - if (keyPairs != null && keyPairs.length > 0) { - for (final EC2SSHKeyPair key : keyPairs) { - DescribeKeyPairsResponseItemType respItemType = new DescribeKeyPairsResponseItemType(); - respItemType.setKeyFingerprint(key.getFingerprint()); - respItemType.setKeyName(key.getKeyName()); - respInfoType.addItem(respItemType); - } - } - - DescribeKeyPairsResponseType respType = new DescribeKeyPairsResponseType(); - respType.setRequestId(UUID.randomUUID().toString()); - respType.setKeySet(respInfoType); - - DescribeKeyPairsResponse resp = new DescribeKeyPairsResponse(); - resp.setDescribeKeyPairsResponse(respType); - return resp; - } - - public ImportKeyPairResponse importKeyPair(ImportKeyPair importKeyPair) { - String publicKey = importKeyPair.getImportKeyPair().getPublicKeyMaterial(); - if (!publicKey.contains(" ")) - publicKey = new String(Base64.decodeBase64(publicKey.getBytes())); - - EC2ImportKeyPair ec2Request = new EC2ImportKeyPair(); - if (ec2Request != null) { - ec2Request.setKeyName(importKeyPair.getImportKeyPair().getKeyName()); - ec2Request.setPublicKeyMaterial(publicKey); - } - - return toImportKeyPair(engine.importKeyPair(ec2Request)); - } - - public static ImportKeyPairResponse toImportKeyPair(final EC2SSHKeyPair key) { - ImportKeyPairResponseType respType = new ImportKeyPairResponseType(); - respType.setRequestId(UUID.randomUUID().toString()); - respType.setKeyName(key.getKeyName()); - respType.setKeyFingerprint(key.getFingerprint()); - - ImportKeyPairResponse response = new ImportKeyPairResponse(); - response.setImportKeyPairResponse(respType); - - return response; - } - - public CreateKeyPairResponse createKeyPair(CreateKeyPair createKeyPair) { - EC2CreateKeyPair ec2Request = new EC2CreateKeyPair(); - if (ec2Request != null) { - ec2Request.setKeyName(createKeyPair.getCreateKeyPair().getKeyName()); - } - - return toCreateKeyPair(engine.createKeyPair( ec2Request )); - } - - public static CreateKeyPairResponse toCreateKeyPair(final EC2SSHKeyPair key) { - CreateKeyPairResponseType respType = new CreateKeyPairResponseType(); - respType.setRequestId(UUID.randomUUID().toString()); - respType.setKeyName(key.getKeyName()); - respType.setKeyFingerprint(key.getFingerprint()); - respType.setKeyMaterial(key.getPrivateKey()); - - CreateKeyPairResponse response = new CreateKeyPairResponse(); - response.setCreateKeyPairResponse(respType); - - return response; - } - - public DeleteKeyPairResponse deleteKeyPair(DeleteKeyPair deleteKeyPair) { - EC2DeleteKeyPair ec2Request = new EC2DeleteKeyPair(); - ec2Request.setKeyName(deleteKeyPair.getDeleteKeyPair().getKeyName()); - - return toDeleteKeyPair(engine.deleteKeyPair(ec2Request)); - } - - public static DeleteKeyPairResponse toDeleteKeyPair(final boolean success) { - DeleteKeyPairResponseType respType = new DeleteKeyPairResponseType(); - respType.setRequestId(UUID.randomUUID().toString()); - respType.set_return(success); - - DeleteKeyPairResponse response = new DeleteKeyPairResponse(); - response.setDeleteKeyPairResponse(respType); - - return response; - } - - public GetPasswordDataResponse getPasswordData(GetPasswordData getPasswordData) { - return toGetPasswordData(engine.getPasswordData(getPasswordData.getGetPasswordData().getInstanceId())); - } - - @SuppressWarnings("serial") - public static GetPasswordDataResponse toGetPasswordData(final EC2PasswordData passwdData) { - return new GetPasswordDataResponse() {{ - setGetPasswordDataResponse(new GetPasswordDataResponseType() {{ - setRequestId(UUID.randomUUID().toString()); - setTimestamp(Calendar.getInstance()); - setPasswordData(passwdData.getEncryptedPassword()); - setInstanceId(passwdData.getInstanceId()); - }}); - }}; - } - - - - - // Actions not yet implemented: - - public ActivateLicenseResponse activateLicense(ActivateLicense activateLicense) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public AssociateDhcpOptionsResponse associateDhcpOptions(AssociateDhcpOptions associateDhcpOptions) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - }; - - public AttachVpnGatewayResponse attachVpnGateway(AttachVpnGateway attachVpnGateway) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public BundleInstanceResponse bundleInstance(BundleInstance bundleInstance) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CancelBundleTaskResponse cancelBundleTask(CancelBundleTask cancelBundleTask) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CancelConversionTaskResponse cancelConversionTask(CancelConversionTask cancelConversionTask) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CancelSpotInstanceRequestsResponse cancelSpotInstanceRequests(CancelSpotInstanceRequests cancelSpotInstanceRequests) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public ConfirmProductInstanceResponse confirmProductInstance(ConfirmProductInstance confirmProductInstance) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CreateCustomerGatewayResponse createCustomerGateway(CreateCustomerGateway createCustomerGateway) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CreateDhcpOptionsResponse createDhcpOptions(CreateDhcpOptions createDhcpOptions) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CreatePlacementGroupResponse createPlacementGroup(CreatePlacementGroup createPlacementGroup) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CreateSpotDatafeedSubscriptionResponse createSpotDatafeedSubscription(CreateSpotDatafeedSubscription createSpotDatafeedSubscription) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CreateSubnetResponse createSubnet(CreateSubnet createSubnet) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CreateTagsResponse createTags(CreateTags createTags) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CreateVpcResponse createVpc(CreateVpc createVpc) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CreateVpnConnectionResponse createVpnConnection(CreateVpnConnection createVpnConnection) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public CreateVpnGatewayResponse createVpnGateway(CreateVpnGateway createVpnGateway) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeactivateLicenseResponse deactivateLicense(DeactivateLicense deactivateLicense) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeleteCustomerGatewayResponse deleteCustomerGateway(DeleteCustomerGateway deleteCustomerGateway) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeleteDhcpOptionsResponse deleteDhcpOptions(DeleteDhcpOptions deleteDhcpOptions) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeletePlacementGroupResponse deletePlacementGroup(DeletePlacementGroup deletePlacementGroup) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeleteSpotDatafeedSubscriptionResponse deleteSpotDatafeedSubscription(DeleteSpotDatafeedSubscription deleteSpotDatafeedSubscription) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeleteSubnetResponse deleteSubnet(DeleteSubnet deleteSubnet) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeleteTagsResponse deleteTags(DeleteTags deleteTags) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeleteVpcResponse deleteVpc(DeleteVpc deleteVpc) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeleteVpnConnectionResponse deleteVpnConnection(DeleteVpnConnection deleteVpnConnection) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DeleteVpnGatewayResponse deleteVpnGateway(DeleteVpnGateway deleteVpnGateway) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeBundleTasksResponse describeBundleTasks(DescribeBundleTasks describeBundleTasks) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeConversionTasksResponse describeConversionTasks(DescribeConversionTasks describeConversionTasks) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeCustomerGatewaysResponse describeCustomerGateways(DescribeCustomerGateways describeCustomerGateways) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeDhcpOptionsResponse describeDhcpOptions(DescribeDhcpOptions describeDhcpOptions) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeLicensesResponse describeLicenses(DescribeLicenses describeLicenses) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribePlacementGroupsResponse describePlacementGroups(DescribePlacementGroups describePlacementGroups) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeRegionsResponse describeRegions(DescribeRegions describeRegions) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeReservedInstancesResponse describeReservedInstances(DescribeReservedInstances describeReservedInstances) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeReservedInstancesOfferingsResponse describeReservedInstancesOfferings(DescribeReservedInstancesOfferings describeReservedInstancesOfferings) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeSnapshotAttributeResponse describeSnapshotAttribute(DescribeSnapshotAttribute describeSnapshotAttribute) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeSpotDatafeedSubscriptionResponse describeSpotDatafeedSubscription(DescribeSpotDatafeedSubscription describeSpotDatafeedSubscription) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeSpotInstanceRequestsResponse describeSpotInstanceRequests(DescribeSpotInstanceRequests describeSpotInstanceRequests) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeSpotPriceHistoryResponse describeSpotPriceHistory(DescribeSpotPriceHistory describeSpotPriceHistory) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeSubnetsResponse describeSubnets(DescribeSubnets describeSubnets) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeTagsResponse describeTags(DescribeTags describeTags) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeVpcsResponse describeVpcs(DescribeVpcs describeVpcs) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeVpnConnectionsResponse describeVpnConnections(DescribeVpnConnections describeVpnConnections) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DescribeVpnGatewaysResponse describeVpnGateways(DescribeVpnGateways describeVpnGateways) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public DetachVpnGatewayResponse detachVpnGateway(DetachVpnGateway detachVpnGateway) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public GetConsoleOutputResponse getConsoleOutput(GetConsoleOutput getConsoleOutput) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public ImportInstanceResponse importInstance(ImportInstance importInstance) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public ImportVolumeResponse importVolume(ImportVolume importVolume) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public ModifyInstanceAttributeResponse modifyInstanceAttribute(ModifyInstanceAttribute modifyInstanceAttribute) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public ModifySnapshotAttributeResponse modifySnapshotAttribute(ModifySnapshotAttribute modifySnapshotAttribute) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public PurchaseReservedInstancesOfferingResponse purchaseReservedInstancesOffering(PurchaseReservedInstancesOffering purchaseReservedInstancesOffering) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public RequestSpotInstancesResponse requestSpotInstances(RequestSpotInstances requestSpotInstances) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public ResetInstanceAttributeResponse resetInstanceAttribute(ResetInstanceAttribute resetInstanceAttribute) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - - public ResetSnapshotAttributeResponse resetSnapshotAttribute(ResetSnapshotAttribute resetSnapshotAttribute) { - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service; + +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.UUID; + +import org.apache.commons.codec.binary.Base64; + +import com.amazon.ec2.*; +import com.cloud.bridge.service.core.ec2.EC2Address; +import com.cloud.bridge.service.core.ec2.EC2AddressFilterSet; +import com.cloud.bridge.service.core.ec2.EC2AssociateAddress; +import com.cloud.bridge.service.core.ec2.EC2AuthorizeRevokeSecurityGroup; +import com.cloud.bridge.service.core.ec2.EC2CreateImage; +import com.cloud.bridge.service.core.ec2.EC2CreateImageResponse; +import com.cloud.bridge.service.core.ec2.EC2CreateKeyPair; +import com.cloud.bridge.service.core.ec2.EC2CreateVolume; +import com.cloud.bridge.service.core.ec2.EC2DeleteKeyPair; +import com.cloud.bridge.service.core.ec2.EC2DescribeAddresses; +import com.cloud.bridge.service.core.ec2.EC2DescribeAddressesResponse; +import com.cloud.bridge.service.core.ec2.EC2DescribeAvailabilityZones; +import com.cloud.bridge.service.core.ec2.EC2DescribeAvailabilityZonesResponse; +import com.cloud.bridge.service.core.ec2.EC2DescribeImages; +import com.cloud.bridge.service.core.ec2.EC2DescribeImagesResponse; +import com.cloud.bridge.service.core.ec2.EC2DescribeInstances; +import com.cloud.bridge.service.core.ec2.EC2DescribeInstancesResponse; +import com.cloud.bridge.service.core.ec2.EC2DescribeKeyPairs; +import com.cloud.bridge.service.core.ec2.EC2DescribeKeyPairsResponse; +import com.cloud.bridge.service.core.ec2.EC2DescribeSecurityGroups; +import com.cloud.bridge.service.core.ec2.EC2DescribeSecurityGroupsResponse; +import com.cloud.bridge.service.core.ec2.EC2DescribeSnapshots; +import com.cloud.bridge.service.core.ec2.EC2DescribeSnapshotsResponse; +import com.cloud.bridge.service.core.ec2.EC2DescribeVolumes; +import com.cloud.bridge.service.core.ec2.EC2DescribeVolumesResponse; +import com.cloud.bridge.service.core.ec2.EC2DisassociateAddress; +import com.cloud.bridge.service.core.ec2.EC2Engine; +import com.cloud.bridge.service.core.ec2.EC2Filter; +import com.cloud.bridge.service.core.ec2.EC2GroupFilterSet; +import com.cloud.bridge.service.core.ec2.EC2Image; +import com.cloud.bridge.service.core.ec2.EC2ImportKeyPair; +import com.cloud.bridge.service.core.ec2.EC2Instance; +import com.cloud.bridge.service.core.ec2.EC2InstanceFilterSet; +import com.cloud.bridge.service.core.ec2.EC2IpPermission; +import com.cloud.bridge.service.core.ec2.EC2KeyPairFilterSet; +import com.cloud.bridge.service.core.ec2.EC2PasswordData; +import com.cloud.bridge.service.core.ec2.EC2RebootInstances; +import com.cloud.bridge.service.core.ec2.EC2RegisterImage; +import com.cloud.bridge.service.core.ec2.EC2ReleaseAddress; +import com.cloud.bridge.service.core.ec2.EC2RunInstances; +import com.cloud.bridge.service.core.ec2.EC2RunInstancesResponse; +import com.cloud.bridge.service.core.ec2.EC2SSHKeyPair; +import com.cloud.bridge.service.core.ec2.EC2SecurityGroup; +import com.cloud.bridge.service.core.ec2.EC2Snapshot; +import com.cloud.bridge.service.core.ec2.EC2SnapshotFilterSet; +import com.cloud.bridge.service.core.ec2.EC2StartInstances; +import com.cloud.bridge.service.core.ec2.EC2StartInstancesResponse; +import com.cloud.bridge.service.core.ec2.EC2StopInstances; +import com.cloud.bridge.service.core.ec2.EC2StopInstancesResponse; +import com.cloud.bridge.service.core.ec2.EC2Volume; +import com.cloud.bridge.service.core.ec2.EC2VolumeFilterSet; +import com.cloud.bridge.service.exception.EC2ServiceException; +import com.cloud.bridge.service.exception.EC2ServiceException.ClientError; +import com.cloud.bridge.util.EC2RestAuth; + + +public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface { + + private static EC2Engine engine; + + @SuppressWarnings("static-access") + public EC2SoapServiceImpl(EC2Engine engine) { + this.engine = engine; + } + + public AttachVolumeResponse attachVolume(AttachVolume attachVolume) { + EC2Volume request = new EC2Volume(); + AttachVolumeType avt = attachVolume.getAttachVolume(); + + request.setId(avt.getVolumeId()); + request.setInstanceId(avt.getInstanceId()); + request.setDevice( avt.getDevice()); + return toAttachVolumeResponse( engine.attachVolume( request )); + } + + public AuthorizeSecurityGroupIngressResponse authorizeSecurityGroupIngress(AuthorizeSecurityGroupIngress authorizeSecurityGroupIngress) { + AuthorizeSecurityGroupIngressType sgit = authorizeSecurityGroupIngress.getAuthorizeSecurityGroupIngress(); + IpPermissionSetType ipPerms = sgit.getIpPermissions(); + + EC2AuthorizeRevokeSecurityGroup request = toSecurityGroup( sgit.getGroupName(), ipPerms.getItem()); + return toAuthorizeSecurityGroupIngressResponse( engine.authorizeSecurityGroup( request )); + } + + + public RevokeSecurityGroupIngressResponse revokeSecurityGroupIngress( RevokeSecurityGroupIngress revokeSecurityGroupIngress ) + { + RevokeSecurityGroupIngressType sgit = revokeSecurityGroupIngress.getRevokeSecurityGroupIngress(); + IpPermissionSetType ipPerms = sgit.getIpPermissions(); + + EC2AuthorizeRevokeSecurityGroup request = toSecurityGroup( sgit.getGroupName(), ipPerms.getItem()); + return toRevokeSecurityGroupIngressResponse( engine.revokeSecurityGroup( request )); + } + + + /** + * Authorize and Revoke Security Group Ingress have the same parameters. + */ + private EC2AuthorizeRevokeSecurityGroup toSecurityGroup( String groupName, IpPermissionType[] items ) { + EC2AuthorizeRevokeSecurityGroup request = new EC2AuthorizeRevokeSecurityGroup(); + + request.setName( groupName ); + + for (IpPermissionType ipPerm : items) { + EC2IpPermission perm = new EC2IpPermission(); + perm.setProtocol( ipPerm.getIpProtocol()); + perm.setFromPort( ipPerm.getFromPort()); + perm.setToPort( ipPerm.getToPort()); + + UserIdGroupPairSetType groups = ipPerm.getGroups(); + if (null != groups && groups.getItem() != null) { + UserIdGroupPairType[] groupItems = groups.getItem(); + for (UserIdGroupPairType groupPair : groupItems) { + EC2SecurityGroup user = new EC2SecurityGroup(); + user.setName( groupPair.getGroupName()); + user.setAccount( groupPair.getUserId()); + perm.addUser( user ); + } + } + + IpRangeSetType ranges = ipPerm.getIpRanges(); + if (ranges != null && ranges.getItem() != null) { + IpRangeItemType[] rangeItems = ranges.getItem(); + for (IpRangeItemType ipRange: rangeItems) + perm.addIpRange( ipRange.getCidrIp() ); + } + + request.addIpPermission( perm ); + } + return request; + } + + public CreateImageResponse createImage(CreateImage createImage) { + EC2CreateImage request = new EC2CreateImage(); + CreateImageType cit = createImage.getCreateImage(); + + request.setInstanceId( cit.getInstanceId()); + request.setName( cit.getName()); + request.setDescription( cit.getDescription()); + return toCreateImageResponse( engine.createImage(request)); + } + + public CreateSecurityGroupResponse createSecurityGroup(CreateSecurityGroup createSecurityGroup) { + CreateSecurityGroupType sgt = createSecurityGroup.getCreateSecurityGroup(); + + return toCreateSecurityGroupResponse( engine.createSecurityGroup(sgt.getGroupName(), sgt.getGroupDescription())); + } + + public CreateSnapshotResponse createSnapshot(CreateSnapshot createSnapshot) { + CreateSnapshotType cst = createSnapshot.getCreateSnapshot(); + return toCreateSnapshotResponse( engine.createSnapshot( cst.getVolumeId()), engine); + } + + public CreateVolumeResponse createVolume(CreateVolume createVolume) { + EC2CreateVolume request = new EC2CreateVolume(); + CreateVolumeType cvt = createVolume.getCreateVolume(); + + request.setSize( cvt.getSize()); + request.setSnapshotId(cvt.getSnapshotId() != null ? cvt.getSnapshotId() : null); + request.setZoneName( cvt.getAvailabilityZone()); + return toCreateVolumeResponse( engine.createVolume( request )); + } + + public DeleteSecurityGroupResponse deleteSecurityGroup(DeleteSecurityGroup deleteSecurityGroup) { + DeleteSecurityGroupType sgt = deleteSecurityGroup.getDeleteSecurityGroup(); + return toDeleteSecurityGroupResponse( engine.deleteSecurityGroup( sgt.getGroupName())); + } + + public DeleteSnapshotResponse deleteSnapshot(DeleteSnapshot deleteSnapshot) { + DeleteSnapshotType dst = deleteSnapshot.getDeleteSnapshot(); + return toDeleteSnapshotResponse( engine.deleteSnapshot( dst.getSnapshotId())); + } + + public DeleteVolumeResponse deleteVolume(DeleteVolume deleteVolume) { + EC2Volume request = new EC2Volume(); + DeleteVolumeType avt = deleteVolume.getDeleteVolume(); + + request.setId(avt.getVolumeId()); + return toDeleteVolumeResponse( engine.deleteVolume( request )); + } + + public DeregisterImageResponse deregisterImage(DeregisterImage deregisterImage) { + DeregisterImageType dit = deregisterImage.getDeregisterImage(); + EC2Image image = new EC2Image(); + + image.setId( dit.getImageId()); + return toDeregisterImageResponse( engine.deregisterImage( image )); + } + + public DescribeAvailabilityZonesResponse describeAvailabilityZones(DescribeAvailabilityZones describeAvailabilityZones) { + EC2DescribeAvailabilityZones request = new EC2DescribeAvailabilityZones(); + + DescribeAvailabilityZonesType dazt = describeAvailabilityZones.getDescribeAvailabilityZones(); + DescribeAvailabilityZonesSetType dazs = dazt.getAvailabilityZoneSet(); + DescribeAvailabilityZonesSetItemType[] items = dazs.getItem(); + if (null != items) { // -> can be empty + for( int i=0; i < items.length; i++ ) request.addZone( items[i].getZoneName()); + } + return toDescribeAvailabilityZonesResponse( engine.handleRequest( request )); + } + + /** + * This only supports a query about description. + */ + public DescribeImageAttributeResponse describeImageAttribute(DescribeImageAttribute describeImageAttribute) { + EC2DescribeImages request = new EC2DescribeImages(); + DescribeImageAttributeType diat = describeImageAttribute.getDescribeImageAttribute(); + DescribeImageAttributesGroup diag = diat.getDescribeImageAttributesGroup(); + EmptyElementType description = diag.getDescription(); + + if ( null != description ) { + request.addImageSet(diat.getImageId()); + return toDescribeImageAttributeResponse( engine.describeImages( request )); + } + else throw new EC2ServiceException( "Unsupported - only description supported", 501 ); + } + + public DescribeImagesResponse describeImages(DescribeImages describeImages) { + EC2DescribeImages request = new EC2DescribeImages(); + DescribeImagesType dit = describeImages.getDescribeImages(); + + // -> toEC2DescribeImages + DescribeImagesExecutableBySetType param1 = dit.getExecutableBySet(); + if (null != param1) { + DescribeImagesExecutableByType[] items1 = param1.getItem(); + if (null != items1) { + for( int i=0; i < items1.length; i++ ) request.addExecutableBySet( items1[i].getUser()); + } + } + DescribeImagesInfoType param2 = dit.getImagesSet(); + if (null != param2) { + DescribeImagesItemType[] items2 = param2.getItem(); + if (null != items2) { + for( int i=0; i < items2.length; i++ ) request.addImageSet( items2[i].getImageId()); + } + } + DescribeImagesOwnersType param3 = dit.getOwnersSet(); + if (null != param3) { + DescribeImagesOwnerType[] items3 = param3.getItem(); + if (null != items3) { + for( int i=0; i < items3.length; i++ ) request.addOwnersSet( items3[i].getOwner()); + } + } + + return toDescribeImagesResponse( engine.describeImages( request )); + } + + public DescribeInstanceAttributeResponse describeInstanceAttribute(DescribeInstanceAttribute describeInstanceAttribute) { + EC2DescribeInstances request = new EC2DescribeInstances(); + DescribeInstanceAttributeType diat = describeInstanceAttribute.getDescribeInstanceAttribute(); + DescribeInstanceAttributesGroup diag = diat.getDescribeInstanceAttributesGroup(); + EmptyElementType instanceType = diag.getInstanceType(); + + // -> toEC2DescribeInstances + if (null != instanceType) { + request.addInstanceId( diat.getInstanceId()); + return toDescribeInstanceAttributeResponse( engine.describeInstances( request )); + } + throw new EC2ServiceException( "Unsupported - only instanceType supported", 501 ); + } + + + public DescribeInstancesResponse describeInstances( DescribeInstances describeInstances ) + { + EC2DescribeInstances request = new EC2DescribeInstances(); + DescribeInstancesType dit = describeInstances.getDescribeInstances(); + FilterSetType fst = dit.getFilterSet(); + + // -> toEC2DescribeInstances + DescribeInstancesInfoType diit = dit.getInstancesSet(); + DescribeInstancesItemType[] items = diit.getItem(); + if (null != items) { // -> can be empty + for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); + } + + if (null != fst) { + request.setFilterSet( toInstanceFilterSet( fst )); + } + + return toDescribeInstancesResponse( engine.describeInstances( request ), engine ); + } + + + @Override + public DescribeAddressesResponse describeAddresses(DescribeAddresses describeAddresses) { + EC2DescribeAddresses ec2Request = new EC2DescribeAddresses(); + DescribeAddressesType dat = describeAddresses.getDescribeAddresses(); + + DescribeAddressesInfoType dait = dat.getPublicIpsSet(); + DescribeAddressesItemType[] items = dait.getItem(); + if (items != null) { // -> can be empty + for (DescribeAddressesItemType itemType : items) + ec2Request.addPublicIp( itemType.getPublicIp()); + } + + FilterSetType fset = dat.getFilterSet(); + if (fset != null) { + ec2Request.setFilterSet(toAddressFilterSet(fset)); + } + + return toDescribeAddressesResponse( engine.describeAddresses( ec2Request )); + } + + @Override + public AllocateAddressResponse allocateAddress(AllocateAddress allocateAddress) { + return toAllocateAddressResponse( engine.allocateAddress()); + } + + @Override + public ReleaseAddressResponse releaseAddress(ReleaseAddress releaseAddress) { + EC2ReleaseAddress request = new EC2ReleaseAddress(); + + request.setPublicIp(releaseAddress.getReleaseAddress().getPublicIp()); + + return toReleaseAddressResponse( engine.releaseAddress( request ) ); + } + + @Override + public AssociateAddressResponse associateAddress(AssociateAddress associateAddress) { + EC2AssociateAddress request = new EC2AssociateAddress(); + + request.setPublicIp(associateAddress.getAssociateAddress().getPublicIp()); + request.setInstanceId(associateAddress.getAssociateAddress().getInstanceId()); + + return toAssociateAddressResponse( engine.associateAddress( request ) ); + } + + @Override + public DisassociateAddressResponse disassociateAddress(DisassociateAddress disassociateAddress) { + EC2DisassociateAddress request = new EC2DisassociateAddress(); + + request.setPublicIp(disassociateAddress.getDisassociateAddress().getPublicIp()); + + return toDisassociateAddressResponse( engine.disassociateAddress( request ) ); + } + + public DescribeSecurityGroupsResponse describeSecurityGroups(DescribeSecurityGroups describeSecurityGroups) + { + EC2DescribeSecurityGroups request = new EC2DescribeSecurityGroups(); + + DescribeSecurityGroupsType sgt = describeSecurityGroups.getDescribeSecurityGroups(); + + FilterSetType fst = sgt.getFilterSet(); + + // -> toEC2DescribeSecurityGroups + DescribeSecurityGroupsSetType sgst = sgt.getSecurityGroupSet(); + DescribeSecurityGroupsSetItemType[] items = sgst.getItem(); + if (null != items) { // -> can be empty + for (DescribeSecurityGroupsSetItemType item :items) request.addGroupName(item.getGroupName()); + } + + if (null != fst) { + request.setFilterSet( toGroupFilterSet( fst )); + } + + + return toDescribeSecurityGroupsResponse( engine.describeSecurityGroups( request )); + } + + public DescribeSnapshotsResponse describeSnapshots(DescribeSnapshots describeSnapshots) + { + EC2DescribeSnapshots request = new EC2DescribeSnapshots(); + DescribeSnapshotsType dst = describeSnapshots.getDescribeSnapshots(); + + DescribeSnapshotsSetType dsst = dst.getSnapshotSet(); + FilterSetType fst = dst.getFilterSet(); + + if (null != dsst) + { + DescribeSnapshotsSetItemType[] items = dsst.getItem(); + if (null != items) { + for( int i=0; i < items.length; i++ ) request.addSnapshotId( items[i].getSnapshotId()); + } + } + + if (null != fst) + { + String[] timeFilters = new String[1]; + timeFilters[0] = new String( "start-time" ); + request.setFilterSet( toSnapshotFilterSet( fst, timeFilters )); + } + + return toDescribeSnapshotsResponse(engine.handleRequest(request)); + } + + + public DescribeVolumesResponse describeVolumes(DescribeVolumes describeVolumes) + { + EC2DescribeVolumes request = new EC2DescribeVolumes(); + DescribeVolumesType dvt = describeVolumes.getDescribeVolumes(); + + DescribeVolumesSetType dvst = dvt.getVolumeSet(); + FilterSetType fst = dvt.getFilterSet(); + + if (null != dvst) + { + DescribeVolumesSetItemType[] items = dvst.getItem(); + if (null != items) { + for( int i=0; i < items.length; i++ ) request.addVolumeId( items[i].getVolumeId()); + } + } + + if (null != fst) + { + String[] timeFilters = new String[2]; + timeFilters[0] = new String( "attachment.attach-time" ); + timeFilters[1] = new String( "create-time" ); + request.setFilterSet( toVolumeFilterSet( fst, timeFilters )); + } + + return toDescribeVolumesResponse( engine.handleRequest( request )); + } + + public DetachVolumeResponse detachVolume(DetachVolume detachVolume) { + EC2Volume request = new EC2Volume(); + DetachVolumeType avt = detachVolume.getDetachVolume(); + + request.setId(avt.getVolumeId()); + request.setInstanceId(avt.getInstanceId()); + request.setDevice( avt.getDevice()); + return toDetachVolumeResponse( engine.detachVolume( request )); + } + + public ModifyImageAttributeResponse modifyImageAttribute(ModifyImageAttribute modifyImageAttribute) { + // TODO: This is broken + EC2Image request = new EC2Image(); + + ModifyImageAttributeType miat = modifyImageAttribute.getModifyImageAttribute(); + ModifyImageAttributeTypeChoice_type0 item = miat.getModifyImageAttributeTypeChoice_type0(); + + AttributeValueType description = item.getDescription(); + /* + LaunchPermissionOperationType launchPermOp = item.getLaunchPermission(); + ProductCodeListType prodCodeList =item.getProductCodes(); + */ + + if (null != description) { + request.setId( miat.getImageId()); + request.setDescription(description.getValue()); + return toModifyImageAttributeResponse( engine.modifyImageAttribute( request )); + } + throw new EC2ServiceException( "Unsupported - can only modify image description", 501 ); + } + + /** + * Did not find a matching service offering so for now we just return disabled + * for each instance request. We could verify that all of the specified instances + * exist to detect an error which would require a listVirtualMachines. + */ + public MonitorInstancesResponse monitorInstances(MonitorInstances monitorInstances) { + MonitorInstancesResponse response = new MonitorInstancesResponse(); + MonitorInstancesResponseType param1 = new MonitorInstancesResponseType(); + MonitorInstancesResponseSetType param2 = new MonitorInstancesResponseSetType(); + + MonitorInstancesType mit = monitorInstances.getMonitorInstances(); + MonitorInstancesSetType mist = mit.getInstancesSet(); + MonitorInstancesSetItemType[] misit = mist.getItem(); + + if (null != misit) { + for( int i=0; i < misit.length; i++ ) { + String instanceId = misit[i].getInstanceId(); + MonitorInstancesResponseSetItemType param3 = new MonitorInstancesResponseSetItemType(); + param3.setInstanceId( instanceId ); + InstanceMonitoringStateType param4 = new InstanceMonitoringStateType(); + param4.setState( "disabled" ); + param3.setMonitoring( param4 ); + param2.addItem( param3 ); + } + } + + param1.setRequestId( UUID.randomUUID().toString()); + param1.setInstancesSet( param2 ); + response.setMonitorInstancesResponse( param1 ); + return response; + } + + public RebootInstancesResponse rebootInstances(RebootInstances rebootInstances) { + EC2RebootInstances request = new EC2RebootInstances(); + RebootInstancesType rit = rebootInstances.getRebootInstances(); + + // -> toEC2StartInstances + RebootInstancesInfoType rist = rit.getInstancesSet(); + RebootInstancesItemType[] items = rist.getItem(); + if (null != items) { // -> should not be empty + for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); + } + return toRebootInstancesResponse( engine.rebootInstances( request )); + } + + + /** + * Processes ec2-register + * + * @param + * + * @see RegisterImage + */ + public RegisterImageResponse registerImage(RegisterImage registerImage) { + EC2RegisterImage request = new EC2RegisterImage(); + RegisterImageType rit = registerImage.getRegisterImage(); + + // -> we redefine the architecture field to hold: "format:zonename:osTypeName", + // these are the bare minimum that we need to call the cloud registerTemplate call. + request.setLocation( rit.getImageLocation()); // -> should be a URL for us + request.setName( rit.getName()); + request.setDescription( rit.getDescription()); + request.setArchitecture( rit.getArchitecture()); + return toRegisterImageResponse( engine.registerImage( request )); + } + + /** + * Processes ec2-reset-image-attribute + * + * @param resetImageAttribute + * + * @see ResetInstanceAttribute + */ + + public ResetImageAttributeResponse resetImageAttribute(ResetImageAttribute resetImageAttribute) { + EC2Image request = new EC2Image(); + ResetImageAttributeType riat = resetImageAttribute.getResetImageAttribute(); + + request.setId( riat.getImageId()); + request.setDescription( "" ); + return toResetImageAttributeResponse( engine.modifyImageAttribute( request )); + } + + /** + * ec2-run-instances + * + * @param runInstances + * + * @see RunInstances + */ + public RunInstancesResponse runInstances(RunInstances runInstances) { + RunInstancesType rit = runInstances.getRunInstances(); + GroupSetType gst = rit.getGroupSet(); + PlacementRequestType prt = rit.getPlacement(); + UserDataType userData = rit.getUserData(); + String type = rit.getInstanceType(); + String keyName = rit.getKeyName(); + + EC2RunInstances request = new EC2RunInstances(); + + request.setTemplateId(rit.getImageId()); + request.setMinCount(rit.getMinCount()); + request.setMaxCount(rit.getMaxCount()); + if (null != type) request.setInstanceType(type); + if (null != prt) request.setZoneName(prt.getAvailabilityZone()); + if (null != userData) request.setUserData(userData.getData()); + if (null != keyName) request.setKeyName(rit.getKeyName() ); + + // -> we can only support one group per instance + if (null != gst) { + GroupItemType[] items = gst.getItem(); + if (null != items && 0 < items.length) request.setGroupId( items[0].getGroupId()); + } + return toRunInstancesResponse( engine.runInstances( request ), engine); + } + + public StartInstancesResponse startInstances(StartInstances startInstances) { + EC2StartInstances request = new EC2StartInstances(); + StartInstancesType sit = startInstances.getStartInstances(); + + // -> toEC2StartInstances + InstanceIdSetType iist = sit.getInstancesSet(); + InstanceIdType[] items = iist.getItem(); + if (null != items) { // -> should not be empty + for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); + } + return toStartInstancesResponse( engine.startInstances( request )); + } + + public StopInstancesResponse stopInstances(StopInstances stopInstances) { + EC2StopInstances request = new EC2StopInstances(); + StopInstancesType sit = stopInstances.getStopInstances(); + + // -> toEC2StopInstances + InstanceIdSetType iist = sit.getInstancesSet(); + InstanceIdType[] items = iist.getItem(); + if (null != items) { // -> should not be empty + for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); + } + return toStopInstancesResponse( engine.stopInstances( request )); + } + + /** + * Mapping this to the destroyVirtualMachine cloud API concept. + * This makes sense since when considering the rebootInstances function. In reboot + * any terminated instances are left alone. We will do the same with destroyed instances. + */ + public TerminateInstancesResponse terminateInstances(TerminateInstances terminateInstances) { + EC2StopInstances request = new EC2StopInstances(); + TerminateInstancesType sit = terminateInstances.getTerminateInstances(); + + // -> toEC2StopInstances + InstanceIdSetType iist = sit.getInstancesSet(); + InstanceIdType[] items = iist.getItem(); + if (null != items) { // -> should not be empty + for( int i=0; i < items.length; i++ ) request.addInstanceId( items[i].getInstanceId()); + } + + request.setDestroyInstances( true ); + return toTermInstancesResponse( engine.stopInstances( request )); + } + + /** + * See comment for monitorInstances. + */ + public UnmonitorInstancesResponse unmonitorInstances(UnmonitorInstances unmonitorInstances) { + UnmonitorInstancesResponse response = new UnmonitorInstancesResponse(); + MonitorInstancesResponseType param1 = new MonitorInstancesResponseType(); + MonitorInstancesResponseSetType param2 = new MonitorInstancesResponseSetType(); + + MonitorInstancesType mit = unmonitorInstances.getUnmonitorInstances(); + MonitorInstancesSetType mist = mit.getInstancesSet(); + MonitorInstancesSetItemType[] items = mist.getItem(); + + if (null != items) { + for( int i=0; i < items.length; i++ ) { + String instanceId = items[i].getInstanceId(); + MonitorInstancesResponseSetItemType param3 = new MonitorInstancesResponseSetItemType(); + param3.setInstanceId( instanceId ); + InstanceMonitoringStateType param4 = new InstanceMonitoringStateType(); + param4.setState( "disabled" ); + param3.setMonitoring( param4 ); + param2.addItem( param3 ); + } + } + + param1.setInstancesSet( param2 ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setUnmonitorInstancesResponse( param1 ); + return response; + } + + + public static DescribeImageAttributeResponse toDescribeImageAttributeResponse(EC2DescribeImagesResponse engineResponse) { + DescribeImageAttributeResponse response = new DescribeImageAttributeResponse(); + DescribeImageAttributeResponseType param1 = new DescribeImageAttributeResponseType(); + + EC2Image[] imageSet = engineResponse.getImageSet(); + if ( 0 < imageSet.length ) { + DescribeImageAttributeResponseTypeChoice_type0 param2 = new DescribeImageAttributeResponseTypeChoice_type0(); + NullableAttributeValueType param3 = new NullableAttributeValueType(); + param3.setValue( imageSet[0].getDescription()); + param2.setDescription( param3 ); + param1.setDescribeImageAttributeResponseTypeChoice_type0( param2 ); + param1.setImageId( imageSet[0].getId()); + } + + param1.setRequestId( UUID.randomUUID().toString()); + response.setDescribeImageAttributeResponse( param1 ); + return response; + } + + public static ModifyImageAttributeResponse toModifyImageAttributeResponse( boolean engineResponse ) { + ModifyImageAttributeResponse response = new ModifyImageAttributeResponse(); + ModifyImageAttributeResponseType param1 = new ModifyImageAttributeResponseType(); + + param1.set_return( engineResponse ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setModifyImageAttributeResponse( param1 ); + return response; + } + + public static ResetImageAttributeResponse toResetImageAttributeResponse( boolean engineResponse ) { + ResetImageAttributeResponse response = new ResetImageAttributeResponse(); + ResetImageAttributeResponseType param1 = new ResetImageAttributeResponseType(); + + param1.set_return( engineResponse ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setResetImageAttributeResponse( param1 ); + return response; + } + + public static DescribeImagesResponse toDescribeImagesResponse(EC2DescribeImagesResponse engineResponse ) { + DescribeImagesResponse response = new DescribeImagesResponse(); + DescribeImagesResponseType param1 = new DescribeImagesResponseType(); + DescribeImagesResponseInfoType param2 = new DescribeImagesResponseInfoType(); + + EC2Image[] images = engineResponse.getImageSet(); + for( int i=0; i < images.length; i++ ) { + String accountName = images[i].getAccountName(); + String domainId = images[i].getDomainId(); + String ownerId = domainId + ":" + accountName; + + DescribeImagesResponseItemType param3 = new DescribeImagesResponseItemType(); + param3.setImageId( images[i].getId()); + param3.setImageLocation( "" ); + param3.setImageState( (images[i].getIsReady() ? "available" : "unavailable" )); + param3.setImageOwnerId(ownerId); + param3.setIsPublic( images[i].getIsPublic()); + + ProductCodesSetType param4 = new ProductCodesSetType(); + ProductCodesSetItemType param5 = new ProductCodesSetItemType(); + param5.setProductCode( "" ); + param4.addItem( param5 ); + param3.setProductCodes( param4 ); + + String description = images[i].getDescription(); + param3.setDescription( (null == description ? "" : description)); + + if (null == description) param3.setArchitecture( "" ); + else if (-1 != description.indexOf( "x86_64" )) param3.setArchitecture( "x86_64" ); + else if (-1 != description.indexOf( "i386" )) param3.setArchitecture( "i386" ); + else param3.setArchitecture( "" ); + + param3.setImageType( "machine" ); + param3.setKernelId( "" ); + param3.setRamdiskId( "" ); + param3.setPlatform( "" ); + + StateReasonType param6 = new StateReasonType(); + param6.setCode( "" ); + param6.setMessage( "" ); + param3.setStateReason( param6 ); + + param3.setImageOwnerAlias( "" ); + param3.setName( images[i].getName()); + param3.setRootDeviceType( "" ); + param3.setRootDeviceName( "" ); + + BlockDeviceMappingType param7 = new BlockDeviceMappingType(); + BlockDeviceMappingItemType param8 = new BlockDeviceMappingItemType(); + BlockDeviceMappingItemTypeChoice_type0 param9 = new BlockDeviceMappingItemTypeChoice_type0(); + param8.setDeviceName( "" ); + param9.setVirtualName( "" ); + EbsBlockDeviceType param10 = new EbsBlockDeviceType(); + param10.setSnapshotId( "" ); + param10.setVolumeSize( 0 ); + param10.setDeleteOnTermination( false ); + param9.setEbs( param10 ); + param8.setBlockDeviceMappingItemTypeChoice_type0( param9 ); + param7.addItem( param8 ); + + param3.setBlockDeviceMapping( param7 ); + param2.addItem( param3 ); + } + + param1.setImagesSet( param2 ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setDescribeImagesResponse( param1 ); + return response; + } + + public static CreateImageResponse toCreateImageResponse(EC2CreateImageResponse engineResponse) { + CreateImageResponse response = new CreateImageResponse(); + CreateImageResponseType param1 = new CreateImageResponseType(); + + param1.setImageId( engineResponse.getId()); + param1.setRequestId( UUID.randomUUID().toString()); + response.setCreateImageResponse( param1 ); + return response; + } + + public static RegisterImageResponse toRegisterImageResponse(EC2CreateImageResponse engineResponse) { + RegisterImageResponse response = new RegisterImageResponse(); + RegisterImageResponseType param1 = new RegisterImageResponseType(); + + param1.setImageId( engineResponse.getId()); + param1.setRequestId( UUID.randomUUID().toString()); + response.setRegisterImageResponse( param1 ); + return response; + } + + public static DeregisterImageResponse toDeregisterImageResponse( boolean engineResponse) { + DeregisterImageResponse response = new DeregisterImageResponse(); + DeregisterImageResponseType param1 = new DeregisterImageResponseType(); + + param1.set_return( engineResponse ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setDeregisterImageResponse( param1 ); + return response; + } + + // filtersets + private EC2AddressFilterSet toAddressFilterSet( FilterSetType fst ) { + EC2AddressFilterSet vfs = new EC2AddressFilterSet(); + + FilterType[] items = fst.getItem(); + if (items != null) { + // -> each filter can have one or more values associated with it + for (FilterType item : items) { + EC2Filter oneFilter = new EC2Filter(); + String filterName = item.getName(); + oneFilter.setName( filterName ); + + ValueSetType vst = item.getValueSet(); + ValueType[] valueItems = vst.getItem(); + for (ValueType valueItem : valueItems) { + oneFilter.addValueEncoded( valueItem.getValue()); + } + vfs.addFilter( oneFilter ); + } + } + return vfs; + } + + private EC2KeyPairFilterSet toKeyPairFilterSet( FilterSetType fst ) + { + EC2KeyPairFilterSet vfs = new EC2KeyPairFilterSet(); + + FilterType[] items = fst.getItem(); + if (items != null) { + // -> each filter can have one or more values associated with it + for (FilterType item : items) { + EC2Filter oneFilter = new EC2Filter(); + String filterName = item.getName(); + oneFilter.setName( filterName ); + + ValueSetType vst = item.getValueSet(); + ValueType[] valueItems = vst.getItem(); + for (ValueType valueItem : valueItems) { + oneFilter.addValueEncoded( valueItem.getValue()); + } + vfs.addFilter( oneFilter ); + } + } + return vfs; + } + + + private EC2VolumeFilterSet toVolumeFilterSet( FilterSetType fst, String[] timeStrs ) + { + EC2VolumeFilterSet vfs = new EC2VolumeFilterSet(); + boolean timeFilter = false; + + FilterType[] items = fst.getItem(); + if (null != items) + { + // -> each filter can have one or more values associated with it + for( int j=0; j < items.length; j++ ) + { + EC2Filter oneFilter = new EC2Filter(); + String filterName = items[j].getName(); + oneFilter.setName( filterName ); + + // -> is the filter one of the xsd:dateTime filters? + timeFilter = false; + for( int m=0; m < timeStrs.length; m++ ) + { + timeFilter = filterName.equalsIgnoreCase( timeStrs[m] ); + if (timeFilter) break; + } + + ValueSetType vst = items[j].getValueSet(); + ValueType[] valueItems = vst.getItem(); + for( int k=0; k < valueItems.length; k++ ) + { + // -> time values are not encoded as regexes + if ( timeFilter ) + oneFilter.addValue( valueItems[k].getValue()); + else oneFilter.addValueEncoded( valueItems[k].getValue()); + } + vfs.addFilter( oneFilter ); + } + } + return vfs; + } + + + private EC2SnapshotFilterSet toSnapshotFilterSet( FilterSetType fst, String[] timeStrs ) + { + EC2SnapshotFilterSet vfs = new EC2SnapshotFilterSet(); + boolean timeFilter = false; + + FilterType[] items = fst.getItem(); + if (null != items) + { + // -> each filter can have one or more values associated with it + for( int j=0; j < items.length; j++ ) + { + EC2Filter oneFilter = new EC2Filter(); + String filterName = items[j].getName(); + oneFilter.setName( filterName ); + + // -> is the filter one of the xsd:dateTime filters? + timeFilter = false; + for( int m=0; m < timeStrs.length; m++ ) + { + timeFilter = filterName.equalsIgnoreCase( timeStrs[m] ); + if (timeFilter) break; + } + + ValueSetType vst = items[j].getValueSet(); + ValueType[] valueItems = vst.getItem(); + for( int k=0; k < valueItems.length; k++ ) + { + // -> time values are not encoded as regexes + if ( timeFilter ) + oneFilter.addValue( valueItems[k].getValue()); + else oneFilter.addValueEncoded( valueItems[k].getValue()); + } + vfs.addFilter( oneFilter ); + } + } + return vfs; + } + + + // TODO make these filter set functions use generics + private EC2GroupFilterSet toGroupFilterSet( FilterSetType fst ) + { + EC2GroupFilterSet gfs = new EC2GroupFilterSet(); + + FilterType[] items = fst.getItem(); + if (null != items) + { + // -> each filter can have one or more values associated with it + for( int j=0; j < items.length; j++ ) + { + EC2Filter oneFilter = new EC2Filter(); + String filterName = items[j].getName(); + oneFilter.setName( filterName ); + + ValueSetType vst = items[j].getValueSet(); + ValueType[] valueItems = vst.getItem(); + for( int k=0; k < valueItems.length; k++ ) + { + oneFilter.addValueEncoded( valueItems[k].getValue()); + } + gfs.addFilter( oneFilter ); + } + } + return gfs; + } + + + private EC2InstanceFilterSet toInstanceFilterSet( FilterSetType fst ) + { + EC2InstanceFilterSet ifs = new EC2InstanceFilterSet(); + + FilterType[] items = fst.getItem(); + if (null != items) + { + // -> each filter can have one or more values associated with it + for( int j=0; j < items.length; j++ ) + { + EC2Filter oneFilter = new EC2Filter(); + String filterName = items[j].getName(); + oneFilter.setName( filterName ); + + ValueSetType vst = items[j].getValueSet(); + ValueType[] valueItems = vst.getItem(); + for( int k=0; k < valueItems.length; k++ ) + { + oneFilter.addValueEncoded( valueItems[k].getValue()); + } + ifs.addFilter( oneFilter ); + } + } + return ifs; + } + + // toMethods + public static DescribeVolumesResponse toDescribeVolumesResponse( EC2DescribeVolumesResponse engineResponse ) + { + DescribeVolumesResponse response = new DescribeVolumesResponse(); + DescribeVolumesResponseType param1 = new DescribeVolumesResponseType(); + DescribeVolumesSetResponseType param2 = new DescribeVolumesSetResponseType(); + + EC2Volume[] volumes = engineResponse.getVolumeSet(); + for (EC2Volume vol : volumes) { + DescribeVolumesSetItemResponseType param3 = new DescribeVolumesSetItemResponseType(); + param3.setVolumeId( vol.getId().toString()); + + Long volSize = new Long(vol.getSize()); + param3.setSize(volSize.toString()); + String snapId = vol.getSnapshotId() != null ? vol.getSnapshotId().toString() : ""; + param3.setSnapshotId(snapId); + param3.setAvailabilityZone( vol.getZoneName()); + param3.setStatus( vol.getState()); + + // -> CloudStack seems to have issues with timestamp formats so just in case + Calendar cal = EC2RestAuth.parseDateString(vol.getCreated()); + if (cal == null) { + cal = Calendar.getInstance(); + cal.set( 1970, 1, 1 ); + } + param3.setCreateTime( cal ); + + AttachmentSetResponseType param4 = new AttachmentSetResponseType(); + if (null != vol.getInstanceId()) { + AttachmentSetItemResponseType param5 = new AttachmentSetItemResponseType(); + param5.setVolumeId(vol.getId().toString()); + param5.setInstanceId(vol.getInstanceId().toString()); + String devicePath = engine.cloudDeviceIdToDevicePath( vol.getHypervisor(), vol.getDeviceId()); + param5.setDevice( devicePath ); + param5.setStatus( toVolumeAttachmentState( vol.getInstanceId(), vol.getVMState())); + param5.setAttachTime( cal ); + param5.setDeleteOnTermination( false ); + param4.addItem( param5 ); + } + + param3.setAttachmentSet( param4 ); + + // -> try to generate an empty tag does not seem to work + ResourceTagSetType param6 = new ResourceTagSetType(); + ResourceTagSetItemType param7 = new ResourceTagSetItemType(); + param7.setKey(""); + param7.setValue(""); + param6.addItem( param7 ); + param3.setTagSet( param6 ); + param2.addItem( param3 ); + } + param1.setVolumeSet( param2 ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setDescribeVolumesResponse( param1 ); + return response; + } + + + public static DescribeInstanceAttributeResponse toDescribeInstanceAttributeResponse(EC2DescribeInstancesResponse engineResponse) { + DescribeInstanceAttributeResponse response = new DescribeInstanceAttributeResponse(); + DescribeInstanceAttributeResponseType param1 = new DescribeInstanceAttributeResponseType(); + + EC2Instance[] instanceSet = engineResponse.getInstanceSet(); + if (0 < instanceSet.length) { + DescribeInstanceAttributeResponseTypeChoice_type0 param2 = new DescribeInstanceAttributeResponseTypeChoice_type0(); + NullableAttributeValueType param3 = new NullableAttributeValueType(); + param3.setValue( instanceSet[0].getServiceOffering()); + param2.setInstanceType( param3 ); + param1.setDescribeInstanceAttributeResponseTypeChoice_type0( param2 ); + param1.setInstanceId( instanceSet[0].getId()); + } + param1.setRequestId( UUID.randomUUID().toString()); + response.setDescribeInstanceAttributeResponse( param1 ); + return response; + } + + + public static DescribeInstancesResponse toDescribeInstancesResponse(EC2DescribeInstancesResponse engineResponse, EC2Engine engine) + { + DescribeInstancesResponse response = new DescribeInstancesResponse(); + DescribeInstancesResponseType param1 = new DescribeInstancesResponseType(); + ReservationSetType param2 = new ReservationSetType(); + + EC2Instance[] instances = engineResponse.getInstanceSet(); + + for (EC2Instance inst:instances) { + String accountName = inst.getAccountName(); + String domainId = inst.getDomainId(); + String ownerId = domainId + ":" + accountName; + + ReservationInfoType param3 = new ReservationInfoType(); + + param3.setReservationId( inst.getId()); // -> an id we could track down if needed + param3.setOwnerId(ownerId); + param3.setRequesterId( "" ); + + GroupSetType param4 = new GroupSetType(); + GroupItemType param5 = new GroupItemType(); + param5.setGroupId( (null == inst.getGroup() ? "" : inst.getGroup())); + param4.addItem( param5 ); + param3.setGroupSet( param4 ); + + RunningInstancesSetType param6 = new RunningInstancesSetType(); + RunningInstancesItemType param7 = new RunningInstancesItemType(); + + param7.setInstanceId( inst.getId()); + param7.setImageId( inst.getTemplateId()); + + InstanceStateType param8 = new InstanceStateType(); + param8.setCode( toAmazonCode( inst.getState())); + param8.setName( toAmazonStateName( inst.getState())); + param7.setInstanceState( param8 ); + + param7.setPrivateDnsName( "" ); + param7.setDnsName( "" ); + param7.setReason( "" ); + param7.setKeyName( "" ); + param7.setAmiLaunchIndex( "" ); + param7.setInstanceType( inst.getServiceOffering()); + + ProductCodesSetType param9 = new ProductCodesSetType(); + ProductCodesSetItemType param10 = new ProductCodesSetItemType(); + param10.setProductCode( "" ); + param9.addItem( param10 ); + param7.setProductCodes( param9 ); + + Calendar cal = inst.getCreated(); + if ( null == cal ) { + cal = Calendar.getInstance(); +// cal.set( 1970, 1, 1 ); + } + param7.setLaunchTime( cal ); + + PlacementResponseType param11 = new PlacementResponseType(); + param11.setAvailabilityZone( inst.getZoneName()); + param11.setGroupName( "" ); + param7.setPlacement( param11 ); + param7.setKernelId( "" ); + param7.setRamdiskId( "" ); + param7.setPlatform( "" ); + + InstanceMonitoringStateType param12 = new InstanceMonitoringStateType(); + param12.setState( "" ); + param7.setMonitoring( param12 ); + param7.setSubnetId( "" ); + param7.setVpcId( "" ); +// String ipAddr = inst.getPrivateIpAddress(); +// param7.setPrivateIpAddress((null != ipAddr ? ipAddr : "")); + param7.setPrivateIpAddress(inst.getIpAddress()); + param7.setIpAddress( inst.getIpAddress()); + + StateReasonType param13 = new StateReasonType(); + param13.setCode( "" ); + param13.setMessage( "" ); + param7.setStateReason( param13 ); + param7.setArchitecture( "" ); + param7.setRootDeviceType( "" ); + String devicePath = engine.cloudDeviceIdToDevicePath( inst.getHypervisor(), inst.getRootDeviceId()); + param7.setRootDeviceName( devicePath ); + + InstanceBlockDeviceMappingResponseType param14 = new InstanceBlockDeviceMappingResponseType(); + InstanceBlockDeviceMappingResponseItemType param15 = new InstanceBlockDeviceMappingResponseItemType(); + InstanceBlockDeviceMappingResponseItemTypeChoice_type0 param16 = new InstanceBlockDeviceMappingResponseItemTypeChoice_type0(); + param15.setDeviceName( "" ); + EbsInstanceBlockDeviceMappingResponseType param17 = new EbsInstanceBlockDeviceMappingResponseType(); + param17.setVolumeId( "" ); + param17.setStatus( "" ); + param17.setAttachTime( cal ); + + param17.setDeleteOnTermination( true ); + param16.setEbs( param17 ); + param15.setInstanceBlockDeviceMappingResponseItemTypeChoice_type0( param16 ); + param14.addItem( param15 ); + param7.setBlockDeviceMapping( param14 ); + param7.setInstanceLifecycle( "" ); + param7.setSpotInstanceRequestId( "" ); + + param6.addItem( param7 ); + param3.setInstancesSet( param6 ); + param2.addItem( param3 ); + } + param1.setReservationSet( param2 ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setDescribeInstancesResponse( param1 ); + return response; + } + + + public static DescribeAddressesResponse toDescribeAddressesResponse(EC2DescribeAddressesResponse engineResponse) { + List items = new ArrayList(); + EC2Address[] addressSet = engineResponse.getAddressSet(); + + for (EC2Address addr: addressSet) { + DescribeAddressesResponseItemType item = new DescribeAddressesResponseItemType(); + item.setPublicIp(addr.getIpAddress()); + item.setInstanceId(addr.getAssociatedInstanceId()); + items.add(item); + } + DescribeAddressesResponseInfoType descAddrRespInfoType = new DescribeAddressesResponseInfoType(); + descAddrRespInfoType.setItem(items.toArray(new DescribeAddressesResponseItemType[0])); + + DescribeAddressesResponseType descAddrRespType = new DescribeAddressesResponseType(); + descAddrRespType.setRequestId(UUID.randomUUID().toString()); + descAddrRespType.setAddressesSet(descAddrRespInfoType); + + DescribeAddressesResponse descAddrResp = new DescribeAddressesResponse(); + descAddrResp.setDescribeAddressesResponse(descAddrRespType); + + return descAddrResp; + } + + public static AllocateAddressResponse toAllocateAddressResponse(final EC2Address ec2Address) { + AllocateAddressResponse response = new AllocateAddressResponse(); + AllocateAddressResponseType param1 = new AllocateAddressResponseType(); + + param1.setPublicIp(ec2Address.getIpAddress()); + param1.setRequestId(UUID.randomUUID().toString()); + response.setAllocateAddressResponse(param1); + return response; + } + + public static ReleaseAddressResponse toReleaseAddressResponse(final boolean result) { + ReleaseAddressResponse response = new ReleaseAddressResponse(); + ReleaseAddressResponseType param1 = new ReleaseAddressResponseType(); + + param1.set_return(result); + param1.setRequestId(UUID.randomUUID().toString()); + + response.setReleaseAddressResponse(param1); + return response; + } + + public static AssociateAddressResponse toAssociateAddressResponse(final boolean result) { + AssociateAddressResponse response = new AssociateAddressResponse(); + AssociateAddressResponseType param1 = new AssociateAddressResponseType(); + + param1.setRequestId(UUID.randomUUID().toString()); + param1.set_return(result); + + response.setAssociateAddressResponse(param1); + return response; + } + + public static DisassociateAddressResponse toDisassociateAddressResponse(final boolean result) { + DisassociateAddressResponse response = new DisassociateAddressResponse(); + DisassociateAddressResponseType param1 = new DisassociateAddressResponseType(); + + param1.setRequestId(UUID.randomUUID().toString()); + param1.set_return(result); + + response.setDisassociateAddressResponse(param1); + return response; + } + + /** + * Map our cloud state values into what Amazon defines. + * Where are the values that can be returned by our cloud api defined? + * + * @param cloudState + * @return + */ + public static int toAmazonCode( String cloudState ) + { + if (null == cloudState) return 48; + + if (cloudState.equalsIgnoreCase( "Destroyed" )) return 48; + else if (cloudState.equalsIgnoreCase( "Stopped" )) return 80; + else if (cloudState.equalsIgnoreCase( "Running" )) return 16; + else if (cloudState.equalsIgnoreCase( "Starting" )) return 0; + else if (cloudState.equalsIgnoreCase( "Stopping" )) return 64; + else if (cloudState.equalsIgnoreCase( "Error" )) return 1; + else if (cloudState.equalsIgnoreCase( "Expunging" )) return 48; + else return 16; + } + + public static String toAmazonStateName( String cloudState ) + { + if (null == cloudState) return new String( "terminated" ); + + if (cloudState.equalsIgnoreCase( "Destroyed" )) return new String( "terminated" ); + else if (cloudState.equalsIgnoreCase( "Stopped" )) return new String( "stopped" ); + else if (cloudState.equalsIgnoreCase( "Running" )) return new String( "running" ); + else if (cloudState.equalsIgnoreCase( "Starting" )) return new String( "pending" ); + else if (cloudState.equalsIgnoreCase( "Stopping" )) return new String( "stopping" ); + else if (cloudState.equalsIgnoreCase( "Error" )) return new String( "error" ); + else if (cloudState.equalsIgnoreCase( "Expunging" )) return new String( "terminated"); + else return new String( "running" ); + } + + /** + * We assume a state for the volume based on what its associated VM is doing. + * + * @param vmId + * @param vmState + * @return + */ + public static String toVolumeAttachmentState(String instanceId, String vmState ) { + if (null == instanceId || null == vmState) return "detached"; + + if (vmState.equalsIgnoreCase( "Destroyed" )) return "detached"; + else if (vmState.equalsIgnoreCase( "Stopped" )) return "attached"; + else if (vmState.equalsIgnoreCase( "Running" )) return "attached"; + else if (vmState.equalsIgnoreCase( "Starting" )) return "attaching"; + else if (vmState.equalsIgnoreCase( "Stopping" )) return "attached"; + else if (vmState.equalsIgnoreCase( "Error" )) return "detached"; + else return "detached"; + } + + public static StopInstancesResponse toStopInstancesResponse(EC2StopInstancesResponse engineResponse) { + StopInstancesResponse response = new StopInstancesResponse(); + StopInstancesResponseType param1 = new StopInstancesResponseType(); + InstanceStateChangeSetType param2 = new InstanceStateChangeSetType(); + + EC2Instance[] instances = engineResponse.getInstanceSet(); + for( int i=0; i < instances.length; i++ ) { + InstanceStateChangeType param3 = new InstanceStateChangeType(); + param3.setInstanceId( instances[i].getId()); + + InstanceStateType param4 = new InstanceStateType(); + param4.setCode( toAmazonCode( instances[i].getState())); + param4.setName( toAmazonStateName( instances[i].getState())); + param3.setCurrentState( param4 ); + + InstanceStateType param5 = new InstanceStateType(); + param5.setCode( toAmazonCode( instances[i].getPreviousState() )); + param5.setName( toAmazonStateName( instances[i].getPreviousState() )); + param3.setPreviousState( param5 ); + + param2.addItem( param3 ); + } + + param1.setRequestId( UUID.randomUUID().toString()); + param1.setInstancesSet( param2 ); + response.setStopInstancesResponse( param1 ); + return response; + } + + public static StartInstancesResponse toStartInstancesResponse(EC2StartInstancesResponse engineResponse) { + StartInstancesResponse response = new StartInstancesResponse(); + StartInstancesResponseType param1 = new StartInstancesResponseType(); + InstanceStateChangeSetType param2 = new InstanceStateChangeSetType(); + + EC2Instance[] instances = engineResponse.getInstanceSet(); + for( int i=0; i < instances.length; i++ ) { + InstanceStateChangeType param3 = new InstanceStateChangeType(); + param3.setInstanceId( instances[i].getId()); + + InstanceStateType param4 = new InstanceStateType(); + param4.setCode( toAmazonCode( instances[i].getState())); + param4.setName( toAmazonStateName( instances[i].getState())); + param3.setCurrentState( param4 ); + + InstanceStateType param5 = new InstanceStateType(); + param5.setCode( toAmazonCode( instances[i].getPreviousState() )); + param5.setName( toAmazonStateName( instances[i].getPreviousState() )); + param3.setPreviousState( param5 ); + + param2.addItem( param3 ); + } + + param1.setRequestId( UUID.randomUUID().toString()); + param1.setInstancesSet( param2 ); + response.setStartInstancesResponse( param1 ); + return response; + } + + public static TerminateInstancesResponse toTermInstancesResponse(EC2StopInstancesResponse engineResponse) { + TerminateInstancesResponse response = new TerminateInstancesResponse(); + TerminateInstancesResponseType param1 = new TerminateInstancesResponseType(); + InstanceStateChangeSetType param2 = new InstanceStateChangeSetType(); + + EC2Instance[] instances = engineResponse.getInstanceSet(); + for( int i=0; i < instances.length; i++ ) { + InstanceStateChangeType param3 = new InstanceStateChangeType(); + param3.setInstanceId( instances[i].getId()); + + InstanceStateType param4 = new InstanceStateType(); + param4.setCode( toAmazonCode( instances[i].getState())); + param4.setName( toAmazonStateName( instances[i].getState())); + param3.setCurrentState( param4 ); + + InstanceStateType param5 = new InstanceStateType(); + param5.setCode( toAmazonCode( instances[i].getPreviousState() )); + param5.setName( toAmazonStateName( instances[i].getPreviousState() )); + param3.setPreviousState( param5 ); + + param2.addItem( param3 ); + } + + param1.setRequestId( UUID.randomUUID().toString()); + param1.setInstancesSet( param2 ); + response.setTerminateInstancesResponse( param1 ); + return response; + } + + public static RebootInstancesResponse toRebootInstancesResponse(boolean engineResponse) { + RebootInstancesResponse response = new RebootInstancesResponse(); + RebootInstancesResponseType param1 = new RebootInstancesResponseType(); + + param1.setRequestId( UUID.randomUUID().toString()); + param1.set_return( engineResponse ); + response.setRebootInstancesResponse( param1 ); + return response; + } + + public static RunInstancesResponse toRunInstancesResponse(EC2RunInstancesResponse engineResponse, EC2Engine engine ) { + RunInstancesResponse response = new RunInstancesResponse(); + RunInstancesResponseType param1 = new RunInstancesResponseType(); + + param1.setReservationId( "" ); + + GroupSetType param2 = new GroupSetType(); + GroupItemType param3 = new GroupItemType(); + param3.setGroupId( "" ); + param2.addItem( param3 ); + param1.setGroupSet( param2 ); + + RunningInstancesSetType param6 = new RunningInstancesSetType(); + EC2Instance[] instances = engineResponse.getInstanceSet(); + for (EC2Instance inst : instances) { + RunningInstancesItemType param7 = new RunningInstancesItemType(); + param7.setInstanceId( inst.getId()); + param7.setImageId( inst.getTemplateId()); + + String accountName = inst.getAccountName(); + String domainId = inst.getDomainId(); + String ownerId = domainId + ":" + accountName; + + param1.setOwnerId(ownerId); + + InstanceStateType param8 = new InstanceStateType(); + param8.setCode( toAmazonCode( inst.getState())); + param8.setName( toAmazonStateName( inst.getState())); + param7.setInstanceState( param8 ); + + param7.setPrivateDnsName( "" ); + param7.setDnsName( "" ); + param7.setReason( "" ); + param7.setKeyName( "" ); + param7.setAmiLaunchIndex( "" ); + + ProductCodesSetType param9 = new ProductCodesSetType(); + ProductCodesSetItemType param10 = new ProductCodesSetItemType(); + param10.setProductCode( "" ); + param9.addItem( param10 ); + param7.setProductCodes( param9 ); + + param7.setInstanceType( inst.getServiceOffering()); + // -> CloudStack seems to have issues with timestamp formats so just in case + Calendar cal = inst.getCreated(); + if ( null == cal ) { + cal = Calendar.getInstance(); + cal.set( 1970, 1, 1 ); + } + param7.setLaunchTime( cal ); + + PlacementResponseType param11 = new PlacementResponseType(); + param11.setAvailabilityZone( inst.getZoneName()); + param7.setPlacement( param11 ); + + param7.setKernelId( "" ); + param7.setRamdiskId( "" ); + param7.setPlatform( "" ); + + InstanceMonitoringStateType param12 = new InstanceMonitoringStateType(); + param12.setState( "" ); + param7.setMonitoring( param12 ); + param7.setSubnetId( "" ); + param7.setVpcId( "" ); + String ipAddr = inst.getPrivateIpAddress(); + param7.setPrivateIpAddress((null != ipAddr ? ipAddr : "")); + param7.setIpAddress( inst.getIpAddress()); + + StateReasonType param13 = new StateReasonType(); + param13.setCode( "" ); + param13.setMessage( "" ); + param7.setStateReason( param13 ); + param7.setArchitecture( "" ); + param7.setRootDeviceType( "" ); + param7.setRootDeviceName( "" ); + + InstanceBlockDeviceMappingResponseType param14 = new InstanceBlockDeviceMappingResponseType(); + InstanceBlockDeviceMappingResponseItemType param15 = new InstanceBlockDeviceMappingResponseItemType(); + InstanceBlockDeviceMappingResponseItemTypeChoice_type0 param16 = new InstanceBlockDeviceMappingResponseItemTypeChoice_type0(); + param15.setDeviceName( "" ); + EbsInstanceBlockDeviceMappingResponseType param17 = new EbsInstanceBlockDeviceMappingResponseType(); + param17.setVolumeId( "" ); + param17.setStatus( "" ); + param17.setAttachTime( cal ); + param17.setDeleteOnTermination( true ); + param16.setEbs( param17 ); + param15.setInstanceBlockDeviceMappingResponseItemTypeChoice_type0( param16 ); + param14.addItem( param15 ); + param7.setBlockDeviceMapping( param14 ); + + param7.setInstanceLifecycle( "" ); + param7.setSpotInstanceRequestId( "" ); + param7.setVirtualizationType( "" ); + param7.setClientToken( "" ); + + ResourceTagSetType param18 = new ResourceTagSetType(); + ResourceTagSetItemType param19 = new ResourceTagSetItemType(); + param19.setKey(""); + param19.setValue(""); + param18.addItem( param19 ); + param7.setTagSet( param18 ); + + String hypervisor = inst.getHypervisor(); + param7.setHypervisor((null != hypervisor ? hypervisor : "")); + param6.addItem( param7 ); + } + param1.setInstancesSet( param6 ); + param1.setRequesterId( "" ); + + param1.setRequestId( UUID.randomUUID().toString()); + response.setRunInstancesResponse( param1 ); + return response; + } + + public static DescribeAvailabilityZonesResponse toDescribeAvailabilityZonesResponse(EC2DescribeAvailabilityZonesResponse engineResponse) { + DescribeAvailabilityZonesResponse response = new DescribeAvailabilityZonesResponse(); + DescribeAvailabilityZonesResponseType param1 = new DescribeAvailabilityZonesResponseType(); + AvailabilityZoneSetType param2 = new AvailabilityZoneSetType(); + + String[] zones = engineResponse.getZoneSet(); + for (String zone : zones) { + AvailabilityZoneItemType param3 = new AvailabilityZoneItemType(); + AvailabilityZoneMessageSetType param4 = new AvailabilityZoneMessageSetType(); + param3.setZoneName( zone ); + param3.setZoneState( "available" ); + param3.setRegionName( "" ); + param3.setMessageSet( param4 ); + param2.addItem( param3 ); + } + + param1.setRequestId( UUID.randomUUID().toString()); + param1.setAvailabilityZoneInfo( param2 ); + response.setDescribeAvailabilityZonesResponse( param1 ); + return response; + } + + public static AttachVolumeResponse toAttachVolumeResponse(EC2Volume engineResponse) { + AttachVolumeResponse response = new AttachVolumeResponse(); + AttachVolumeResponseType param1 = new AttachVolumeResponseType(); + + Calendar cal = Calendar.getInstance(); + + // -> if the instanceId was not given in the request then we have no way to get it + param1.setVolumeId( engineResponse.getId().toString()); + param1.setInstanceId( engineResponse.getInstanceId().toString()); + param1.setDevice( engineResponse.getDevice()); + if ( null != engineResponse.getState()) + param1.setStatus( engineResponse.getState()); + else param1.setStatus( "" ); // ToDo - throw an Soap Fault + + param1.setAttachTime( cal ); + + param1.setRequestId( UUID.randomUUID().toString()); + response.setAttachVolumeResponse( param1 ); + return response; + } + + public static DetachVolumeResponse toDetachVolumeResponse(EC2Volume engineResponse) { + DetachVolumeResponse response = new DetachVolumeResponse(); + DetachVolumeResponseType param1 = new DetachVolumeResponseType(); + Calendar cal = Calendar.getInstance(); + cal.set( 1970, 1, 1 ); // return one value, Unix Epoch, what else can we return? + + param1.setVolumeId( engineResponse.getId().toString()); + param1.setInstanceId( (null == engineResponse.getInstanceId() ? "" : engineResponse.getInstanceId().toString())); + param1.setDevice( (null == engineResponse.getDevice() ? "" : engineResponse.getDevice())); + if ( null != engineResponse.getState()) + param1.setStatus( engineResponse.getState()); + else param1.setStatus( "" ); // ToDo - throw an Soap Fault + + param1.setAttachTime( cal ); + + param1.setRequestId( UUID.randomUUID().toString()); + response.setDetachVolumeResponse( param1 ); + return response; + } + + public static CreateVolumeResponse toCreateVolumeResponse(EC2Volume engineResponse) { + CreateVolumeResponse response = new CreateVolumeResponse(); + CreateVolumeResponseType param1 = new CreateVolumeResponseType(); + + param1.setVolumeId( engineResponse.getId().toString()); + Long volSize = new Long( engineResponse.getSize()); + param1.setSize( volSize.toString()); + param1.setSnapshotId( "" ); + param1.setAvailabilityZone( engineResponse.getZoneName()); + if ( null != engineResponse.getState()) + param1.setStatus( engineResponse.getState()); + else param1.setStatus( "" ); // ToDo - throw an Soap Fault + + // -> CloudStack seems to have issues with timestamp formats so just in case + Calendar cal = EC2RestAuth.parseDateString(engineResponse.getCreated()); + if ( null == cal ) { + cal = Calendar.getInstance(); +// cal.set( 1970, 1, 1 ); + } + param1.setCreateTime( cal ); + + param1.setRequestId( UUID.randomUUID().toString()); + response.setCreateVolumeResponse( param1 ); + return response; + } + + public static DeleteVolumeResponse toDeleteVolumeResponse(EC2Volume engineResponse) { + DeleteVolumeResponse response = new DeleteVolumeResponse(); + DeleteVolumeResponseType param1 = new DeleteVolumeResponseType(); + + if ( null != engineResponse.getState()) + param1.set_return( true ); + else param1.set_return( false ); // ToDo - supposed to return an error + + param1.setRequestId( UUID.randomUUID().toString()); + response.setDeleteVolumeResponse( param1 ); + return response; + } + + public static DescribeSnapshotsResponse toDescribeSnapshotsResponse(EC2DescribeSnapshotsResponse engineResponse) { + DescribeSnapshotsResponse response = new DescribeSnapshotsResponse(); + DescribeSnapshotsResponseType param1 = new DescribeSnapshotsResponseType(); + DescribeSnapshotsSetResponseType param2 = new DescribeSnapshotsSetResponseType(); + + EC2Snapshot[] snaps = engineResponse.getSnapshotSet(); + for (EC2Snapshot snap : snaps) { + DescribeSnapshotsSetItemResponseType param3 = new DescribeSnapshotsSetItemResponseType(); + param3.setSnapshotId( snap.getId()); + param3.setVolumeId( snap.getVolumeId()); + + // our semantics are different than those ec2 uses + if (snap.getState().equalsIgnoreCase("backedup")) { + param3.setStatus("completed"); + param3.setProgress("100%"); + } else if (snap.getState().equalsIgnoreCase("creating")) { + param3.setStatus("pending"); + param3.setProgress("33%"); + } else if (snap.getState().equalsIgnoreCase("backingup")) { + param3.setStatus("pending"); + param3.setProgress("66%"); + } else { + // if we see anything besides: backedup/creating/backingup, we assume error + param3.setStatus("error"); + param3.setProgress("0%"); + } +// param3.setStatus( snap.getState()); + + String ownerId = snap.getDomainId() + ":" + snap.getAccountName(); + + // -> CloudStack seems to have issues with timestamp formats so just in case + Calendar cal = snap.getCreated(); + if ( null == cal ) { + cal = Calendar.getInstance(); + cal.set( 1970, 1, 1 ); + } + param3.setStartTime( cal ); + + param3.setOwnerId(ownerId); + param3.setVolumeSize( snap.getVolumeSize().toString()); + param3.setDescription( snap.getName()); + param3.setOwnerAlias( snap.getAccountName() ); + + ResourceTagSetType param18 = new ResourceTagSetType(); + ResourceTagSetItemType param19 = new ResourceTagSetItemType(); + param19.setKey(""); + param19.setValue(""); + param18.addItem( param19 ); + param3.setTagSet( param18 ); + param2.addItem( param3 ); + } + + param1.setSnapshotSet( param2 ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setDescribeSnapshotsResponse( param1 ); + return response; + } + + public static DeleteSnapshotResponse toDeleteSnapshotResponse( boolean engineResponse ) { + DeleteSnapshotResponse response = new DeleteSnapshotResponse(); + DeleteSnapshotResponseType param1 = new DeleteSnapshotResponseType(); + + param1.set_return( engineResponse ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setDeleteSnapshotResponse( param1 ); + return response; + } + + public static CreateSnapshotResponse toCreateSnapshotResponse(EC2Snapshot engineResponse, EC2Engine engine ) { + CreateSnapshotResponse response = new CreateSnapshotResponse(); + CreateSnapshotResponseType param1 = new CreateSnapshotResponseType(); + + String accountName = engineResponse.getAccountName(); + String domainId = engineResponse.getDomainId().toString(); + String ownerId = domainId + ":" + accountName; + + param1.setSnapshotId( engineResponse.getId().toString()); + param1.setVolumeId( engineResponse.getVolumeId().toString()); + param1.setStatus( "completed" ); + + // -> CloudStack seems to have issues with timestamp formats so just in case + Calendar cal = engineResponse.getCreated(); + if ( null == cal ) { + cal = Calendar.getInstance(); + cal.set( 1970, 1, 1 ); + } + param1.setStartTime( cal ); + + param1.setProgress( "100" ); + param1.setOwnerId(ownerId); + Long volSize = new Long( engineResponse.getVolumeSize()); + param1.setVolumeSize( volSize.toString()); + param1.setDescription( engineResponse.getName()); + param1.setRequestId( UUID.randomUUID().toString()); + response.setCreateSnapshotResponse( param1 ); + return response; + } + + public static DescribeSecurityGroupsResponse toDescribeSecurityGroupsResponse( + EC2DescribeSecurityGroupsResponse engineResponse) { + DescribeSecurityGroupsResponse response = new DescribeSecurityGroupsResponse(); + DescribeSecurityGroupsResponseType param1 = new DescribeSecurityGroupsResponseType(); + SecurityGroupSetType param2 = new SecurityGroupSetType(); + + EC2SecurityGroup[] groups = engineResponse.getGroupSet(); + for (EC2SecurityGroup group : groups) { + SecurityGroupItemType param3 = new SecurityGroupItemType(); + String accountName = group.getAccountName(); + String domainId = group.getDomainId(); + String ownerId = domainId + ":" + accountName; + + param3.setOwnerId(ownerId); + param3.setGroupName(group.getName()); + String desc = group.getDescription(); + param3.setGroupDescription((null != desc ? desc : "")); + + IpPermissionSetType param4 = new IpPermissionSetType(); + EC2IpPermission[] perms = group.getIpPermissionSet(); + for (EC2IpPermission perm : perms) { + // TODO: Fix kludges like this... + if (perm == null) + continue; + IpPermissionType param5 = new IpPermissionType(); + param5.setIpProtocol(perm.getProtocol()); + param5.setFromPort(perm.getFromPort()); + param5.setToPort(perm.getToPort()); + + // -> user groups + EC2SecurityGroup[] userSet = perm.getUserSet(); + if (null == userSet || 0 == userSet.length) { + UserIdGroupPairSetType param8 = new UserIdGroupPairSetType(); + param5.setGroups(param8); + } else { + for (EC2SecurityGroup secGroup : userSet) { + UserIdGroupPairSetType param8 = new UserIdGroupPairSetType(); + UserIdGroupPairType param9 = new UserIdGroupPairType(); + param9.setUserId(secGroup.getAccount()); + param9.setGroupName(secGroup.getName()); + param8.addItem(param9); + param5.setGroups(param8); + } + } + + // -> or CIDR list + String[] rangeSet = perm.getIpRangeSet(); + if (null == rangeSet || 0 == rangeSet.length) { + IpRangeSetType param6 = new IpRangeSetType(); + param5.setIpRanges(param6); + } else { + for (String range : rangeSet) { + // TODO: This needs further attention... + if (range == null) { + range = ""; + } + IpRangeSetType param6 = new IpRangeSetType(); + IpRangeItemType param7 = new IpRangeItemType(); + param7.setCidrIp(range); + param6.addItem(param7); + param5.setIpRanges(param6); + } + } + param4.addItem(param5); + } + param3.setIpPermissions(param4); + param2.addItem(param3); + } + param1.setSecurityGroupInfo(param2); + param1.setRequestId(UUID.randomUUID().toString()); + response.setDescribeSecurityGroupsResponse(param1); + return response; + } + + public static CreateSecurityGroupResponse toCreateSecurityGroupResponse( boolean success ) { + CreateSecurityGroupResponse response = new CreateSecurityGroupResponse(); + CreateSecurityGroupResponseType param1 = new CreateSecurityGroupResponseType(); + + param1.set_return(success); + param1.setRequestId( UUID.randomUUID().toString()); + response.setCreateSecurityGroupResponse( param1 ); + return response; + } + + public static DeleteSecurityGroupResponse toDeleteSecurityGroupResponse( boolean success ) { + DeleteSecurityGroupResponse response = new DeleteSecurityGroupResponse(); + DeleteSecurityGroupResponseType param1 = new DeleteSecurityGroupResponseType(); + + param1.set_return( success ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setDeleteSecurityGroupResponse( param1 ); + return response; + } + + public static AuthorizeSecurityGroupIngressResponse toAuthorizeSecurityGroupIngressResponse( boolean success ) { + AuthorizeSecurityGroupIngressResponse response = new AuthorizeSecurityGroupIngressResponse(); + AuthorizeSecurityGroupIngressResponseType param1 = new AuthorizeSecurityGroupIngressResponseType(); + + param1.set_return( success ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setAuthorizeSecurityGroupIngressResponse( param1 ); + return response; + } + + public static RevokeSecurityGroupIngressResponse toRevokeSecurityGroupIngressResponse( boolean success ) { + RevokeSecurityGroupIngressResponse response = new RevokeSecurityGroupIngressResponse(); + RevokeSecurityGroupIngressResponseType param1 = new RevokeSecurityGroupIngressResponseType(); + + param1.set_return( success ); + param1.setRequestId( UUID.randomUUID().toString()); + response.setRevokeSecurityGroupIngressResponse( param1 ); + return response; + } + + public DescribeKeyPairsResponse describeKeyPairs(DescribeKeyPairs describeKeyPairs) { + + EC2DescribeKeyPairs ec2Request = new EC2DescribeKeyPairs(); + + // multiple keynames may be provided + DescribeKeyPairsInfoType kset = describeKeyPairs.getDescribeKeyPairs().getKeySet(); + if (kset != null) { + DescribeKeyPairsItemType[] keyPairKeys = kset.getItem(); + if (keyPairKeys != null) { + for (DescribeKeyPairsItemType key : keyPairKeys) { + ec2Request.addKeyName(key.getKeyName()); + } + } + } + + // multiple filters may be provided + FilterSetType fset = describeKeyPairs.getDescribeKeyPairs().getFilterSet(); + if (fset != null) { + ec2Request.setKeyFilterSet(toKeyPairFilterSet(fset)); + } + + return toDescribeKeyPairs(engine.describeKeyPairs(ec2Request)); + } + + public static DescribeKeyPairsResponse toDescribeKeyPairs(final EC2DescribeKeyPairsResponse response) { + EC2SSHKeyPair[] keyPairs = response.getKeyPairSet(); + + DescribeKeyPairsResponseInfoType respInfoType = new DescribeKeyPairsResponseInfoType(); + if (keyPairs != null && keyPairs.length > 0) { + for (final EC2SSHKeyPair key : keyPairs) { + DescribeKeyPairsResponseItemType respItemType = new DescribeKeyPairsResponseItemType(); + respItemType.setKeyFingerprint(key.getFingerprint()); + respItemType.setKeyName(key.getKeyName()); + respInfoType.addItem(respItemType); + } + } + + DescribeKeyPairsResponseType respType = new DescribeKeyPairsResponseType(); + respType.setRequestId(UUID.randomUUID().toString()); + respType.setKeySet(respInfoType); + + DescribeKeyPairsResponse resp = new DescribeKeyPairsResponse(); + resp.setDescribeKeyPairsResponse(respType); + return resp; + } + + public ImportKeyPairResponse importKeyPair(ImportKeyPair importKeyPair) { + String publicKey = importKeyPair.getImportKeyPair().getPublicKeyMaterial(); + if (!publicKey.contains(" ")) + publicKey = new String(Base64.decodeBase64(publicKey.getBytes())); + + EC2ImportKeyPair ec2Request = new EC2ImportKeyPair(); + if (ec2Request != null) { + ec2Request.setKeyName(importKeyPair.getImportKeyPair().getKeyName()); + ec2Request.setPublicKeyMaterial(publicKey); + } + + return toImportKeyPair(engine.importKeyPair(ec2Request)); + } + + public static ImportKeyPairResponse toImportKeyPair(final EC2SSHKeyPair key) { + ImportKeyPairResponseType respType = new ImportKeyPairResponseType(); + respType.setRequestId(UUID.randomUUID().toString()); + respType.setKeyName(key.getKeyName()); + respType.setKeyFingerprint(key.getFingerprint()); + + ImportKeyPairResponse response = new ImportKeyPairResponse(); + response.setImportKeyPairResponse(respType); + + return response; + } + + public CreateKeyPairResponse createKeyPair(CreateKeyPair createKeyPair) { + EC2CreateKeyPair ec2Request = new EC2CreateKeyPair(); + if (ec2Request != null) { + ec2Request.setKeyName(createKeyPair.getCreateKeyPair().getKeyName()); + } + + return toCreateKeyPair(engine.createKeyPair( ec2Request )); + } + + public static CreateKeyPairResponse toCreateKeyPair(final EC2SSHKeyPair key) { + CreateKeyPairResponseType respType = new CreateKeyPairResponseType(); + respType.setRequestId(UUID.randomUUID().toString()); + respType.setKeyName(key.getKeyName()); + respType.setKeyFingerprint(key.getFingerprint()); + respType.setKeyMaterial(key.getPrivateKey()); + + CreateKeyPairResponse response = new CreateKeyPairResponse(); + response.setCreateKeyPairResponse(respType); + + return response; + } + + public DeleteKeyPairResponse deleteKeyPair(DeleteKeyPair deleteKeyPair) { + EC2DeleteKeyPair ec2Request = new EC2DeleteKeyPair(); + ec2Request.setKeyName(deleteKeyPair.getDeleteKeyPair().getKeyName()); + + return toDeleteKeyPair(engine.deleteKeyPair(ec2Request)); + } + + public static DeleteKeyPairResponse toDeleteKeyPair(final boolean success) { + DeleteKeyPairResponseType respType = new DeleteKeyPairResponseType(); + respType.setRequestId(UUID.randomUUID().toString()); + respType.set_return(success); + + DeleteKeyPairResponse response = new DeleteKeyPairResponse(); + response.setDeleteKeyPairResponse(respType); + + return response; + } + + public GetPasswordDataResponse getPasswordData(GetPasswordData getPasswordData) { + return toGetPasswordData(engine.getPasswordData(getPasswordData.getGetPasswordData().getInstanceId())); + } + + @SuppressWarnings("serial") + public static GetPasswordDataResponse toGetPasswordData(final EC2PasswordData passwdData) { + return new GetPasswordDataResponse() {{ + setGetPasswordDataResponse(new GetPasswordDataResponseType() {{ + setRequestId(UUID.randomUUID().toString()); + setTimestamp(Calendar.getInstance()); + setPasswordData(passwdData.getEncryptedPassword()); + setInstanceId(passwdData.getInstanceId()); + }}); + }}; + } + + + + + // Actions not yet implemented: + + public ActivateLicenseResponse activateLicense(ActivateLicense activateLicense) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public AssociateDhcpOptionsResponse associateDhcpOptions(AssociateDhcpOptions associateDhcpOptions) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + }; + + public AttachVpnGatewayResponse attachVpnGateway(AttachVpnGateway attachVpnGateway) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public BundleInstanceResponse bundleInstance(BundleInstance bundleInstance) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CancelBundleTaskResponse cancelBundleTask(CancelBundleTask cancelBundleTask) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CancelConversionTaskResponse cancelConversionTask(CancelConversionTask cancelConversionTask) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CancelSpotInstanceRequestsResponse cancelSpotInstanceRequests(CancelSpotInstanceRequests cancelSpotInstanceRequests) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public ConfirmProductInstanceResponse confirmProductInstance(ConfirmProductInstance confirmProductInstance) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CreateCustomerGatewayResponse createCustomerGateway(CreateCustomerGateway createCustomerGateway) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CreateDhcpOptionsResponse createDhcpOptions(CreateDhcpOptions createDhcpOptions) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CreatePlacementGroupResponse createPlacementGroup(CreatePlacementGroup createPlacementGroup) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CreateSpotDatafeedSubscriptionResponse createSpotDatafeedSubscription(CreateSpotDatafeedSubscription createSpotDatafeedSubscription) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CreateSubnetResponse createSubnet(CreateSubnet createSubnet) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CreateTagsResponse createTags(CreateTags createTags) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CreateVpcResponse createVpc(CreateVpc createVpc) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CreateVpnConnectionResponse createVpnConnection(CreateVpnConnection createVpnConnection) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public CreateVpnGatewayResponse createVpnGateway(CreateVpnGateway createVpnGateway) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeactivateLicenseResponse deactivateLicense(DeactivateLicense deactivateLicense) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeleteCustomerGatewayResponse deleteCustomerGateway(DeleteCustomerGateway deleteCustomerGateway) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeleteDhcpOptionsResponse deleteDhcpOptions(DeleteDhcpOptions deleteDhcpOptions) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeletePlacementGroupResponse deletePlacementGroup(DeletePlacementGroup deletePlacementGroup) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeleteSpotDatafeedSubscriptionResponse deleteSpotDatafeedSubscription(DeleteSpotDatafeedSubscription deleteSpotDatafeedSubscription) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeleteSubnetResponse deleteSubnet(DeleteSubnet deleteSubnet) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeleteTagsResponse deleteTags(DeleteTags deleteTags) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeleteVpcResponse deleteVpc(DeleteVpc deleteVpc) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeleteVpnConnectionResponse deleteVpnConnection(DeleteVpnConnection deleteVpnConnection) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DeleteVpnGatewayResponse deleteVpnGateway(DeleteVpnGateway deleteVpnGateway) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeBundleTasksResponse describeBundleTasks(DescribeBundleTasks describeBundleTasks) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeConversionTasksResponse describeConversionTasks(DescribeConversionTasks describeConversionTasks) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeCustomerGatewaysResponse describeCustomerGateways(DescribeCustomerGateways describeCustomerGateways) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeDhcpOptionsResponse describeDhcpOptions(DescribeDhcpOptions describeDhcpOptions) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeLicensesResponse describeLicenses(DescribeLicenses describeLicenses) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribePlacementGroupsResponse describePlacementGroups(DescribePlacementGroups describePlacementGroups) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeRegionsResponse describeRegions(DescribeRegions describeRegions) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeReservedInstancesResponse describeReservedInstances(DescribeReservedInstances describeReservedInstances) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeReservedInstancesOfferingsResponse describeReservedInstancesOfferings(DescribeReservedInstancesOfferings describeReservedInstancesOfferings) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeSnapshotAttributeResponse describeSnapshotAttribute(DescribeSnapshotAttribute describeSnapshotAttribute) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeSpotDatafeedSubscriptionResponse describeSpotDatafeedSubscription(DescribeSpotDatafeedSubscription describeSpotDatafeedSubscription) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeSpotInstanceRequestsResponse describeSpotInstanceRequests(DescribeSpotInstanceRequests describeSpotInstanceRequests) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeSpotPriceHistoryResponse describeSpotPriceHistory(DescribeSpotPriceHistory describeSpotPriceHistory) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeSubnetsResponse describeSubnets(DescribeSubnets describeSubnets) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeTagsResponse describeTags(DescribeTags describeTags) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeVpcsResponse describeVpcs(DescribeVpcs describeVpcs) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeVpnConnectionsResponse describeVpnConnections(DescribeVpnConnections describeVpnConnections) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DescribeVpnGatewaysResponse describeVpnGateways(DescribeVpnGateways describeVpnGateways) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public DetachVpnGatewayResponse detachVpnGateway(DetachVpnGateway detachVpnGateway) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public GetConsoleOutputResponse getConsoleOutput(GetConsoleOutput getConsoleOutput) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public ImportInstanceResponse importInstance(ImportInstance importInstance) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public ImportVolumeResponse importVolume(ImportVolume importVolume) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public ModifyInstanceAttributeResponse modifyInstanceAttribute(ModifyInstanceAttribute modifyInstanceAttribute) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public ModifySnapshotAttributeResponse modifySnapshotAttribute(ModifySnapshotAttribute modifySnapshotAttribute) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public PurchaseReservedInstancesOfferingResponse purchaseReservedInstancesOffering(PurchaseReservedInstancesOffering purchaseReservedInstancesOffering) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public RequestSpotInstancesResponse requestSpotInstances(RequestSpotInstances requestSpotInstances) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public ResetInstanceAttributeResponse resetInstanceAttribute(ResetInstanceAttribute resetInstanceAttribute) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } + + public ResetSnapshotAttributeResponse resetSnapshotAttribute(ResetSnapshotAttribute resetSnapshotAttribute) { + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/S3Constants.java b/awsapi/src/com/cloud/bridge/service/S3Constants.java index f8e702a5697..ee2a1497f41 100644 --- a/awsapi/src/com/cloud/bridge/service/S3Constants.java +++ b/awsapi/src/com/cloud/bridge/service/S3Constants.java @@ -22,9 +22,6 @@ package com.cloud.bridge.service; public interface S3Constants { public final String BUCKET_ATTR_KEY = "s3-bucket"; public final String OBJECT_ATTR_KEY = "s3-object-key"; -<<<<<<< HEAD -======= public final String PLAIN_POST_ACCESS_KEY = "s3-access-key"; public final String PLAIN_POST_SIGNATURE = "s3-signature"; ->>>>>>> 6472e7b... Now really adding the renamed files! } diff --git a/awsapi/src/com/cloud/bridge/service/S3RestServlet.java b/awsapi/src/com/cloud/bridge/service/S3RestServlet.java index e4ec384a111..09b0c55d2a2 100644 --- a/awsapi/src/com/cloud/bridge/service/S3RestServlet.java +++ b/awsapi/src/com/cloud/bridge/service/S3RestServlet.java @@ -1,768 +1,3 @@ -<<<<<<< HEAD -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.io.InputStream; -import java.io.UnsupportedEncodingException; -import java.security.SignatureException; -import java.sql.SQLException; -import java.util.Enumeration; - -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.bind.*; - -import org.apache.axis2.AxisFault; -import org.apache.log4j.Logger; -import org.w3c.dom.Document; -import org.w3c.dom.NamedNodeMap; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.cloud.bridge.model.SAcl; -import com.cloud.bridge.persist.PersistContext; -import com.cloud.bridge.persist.dao.UserCredentialsDao; -import com.cloud.bridge.service.controller.s3.S3BucketAction; -import com.cloud.bridge.service.controller.s3.S3ObjectAction; -import com.cloud.bridge.service.core.s3.S3AccessControlList; -import com.cloud.bridge.service.core.s3.S3AuthParams; -import com.cloud.bridge.service.core.s3.S3Engine; -import com.cloud.bridge.service.core.s3.S3Grant; -import com.cloud.bridge.service.core.s3.S3MetaDataEntry; -import com.cloud.bridge.service.core.s3.S3PutObjectRequest; -import com.cloud.bridge.service.core.s3.S3PutObjectResponse; -import com.cloud.bridge.service.exception.InvalidBucketName; -import com.cloud.bridge.service.exception.NoSuchObjectException; -import com.cloud.bridge.service.exception.PermissionDeniedException; -import com.cloud.bridge.util.AuthenticationUtils; -import com.cloud.bridge.util.HeaderParam; -import com.cloud.bridge.util.MultiPartDimeInputStream; -import com.cloud.bridge.util.RestAuth; -import com.cloud.bridge.util.S3SoapAuth; - -/** - * @author Kelven Yang, Mark Joseph - */ -public class S3RestServlet extends HttpServlet { - private static final long serialVersionUID = -6168996266762804877L; - - public static final Logger logger = Logger.getLogger(S3RestServlet.class); - - protected void doGet(HttpServletRequest req, HttpServletResponse resp) { - processRequest( req, resp, "GET" ); - } - - protected void doPost(HttpServletRequest req, HttpServletResponse resp) - { - // -> DIME requests are authenticated via the SOAP auth mechanism - String type = req.getHeader( "Content-Type" ); - if ( null != type && type.equalsIgnoreCase( "application/dime" )) - processDimeRequest(req, resp); - else processRequest( req, resp, "POST" ); - } - - protected void doPut(HttpServletRequest req, HttpServletResponse resp) { - processRequest( req, resp, "PUT" ); - } - - protected void doHead(HttpServletRequest req, HttpServletResponse resp) { - processRequest( req, resp, "HEAD" ); - } - - protected void doOptions(HttpServletRequest req, HttpServletResponse resp) { - processRequest( req, resp, "OPTIONS" ); - } - - protected void doDelete( HttpServletRequest req, HttpServletResponse resp ) { - processRequest( req, resp, "DELETE" ); - } - - /** - * POST requests do not get authenticated on entry. The associated - * access key and signature headers are embedded in the message not encoded - * as HTTP headers. - */ - private void processRequest( HttpServletRequest request, HttpServletResponse response, String method ) - { - try { - logRequest(request); - - // Our extensions to the S3 REST API for simple management actions - // -> unauthenticated calls, should still be done over HTTPS - String cloudAction = request.getParameter( "CloudAction" ); - if (null != cloudAction) - { - if (cloudAction.equalsIgnoreCase( "SetUserKeys" )) { - setUserKeys(request, response); - return; - } - - if (cloudAction.equalsIgnoreCase( "CloudS3Version" )) { - cloudS3Version(request, response); - return; - } - } - - - // -> authenticated calls - if (!method.equalsIgnoreCase( "POST" )) { - S3AuthParams params = extractRequestHeaders( request ); - authenticateRequest( request, params ); - } - - ServletAction action = routeRequest(request); - if ( action != null ) { - action.execute(request, response); - } - else { - response.setStatus(404); - endResponse(response, "File not found"); - } - - PersistContext.commitTransaction(); - - } - catch( InvalidBucketName e) { - logger.error("Unexpected exception " + e.getMessage(), e); - response.setStatus(400); - endResponse(response, "Invalid Bucket Name - " + e.toString()); - } - catch(PermissionDeniedException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - response.setStatus(403); - endResponse(response, "Access denied - " + e.toString()); - } - catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, "Internal server error"); - - } finally { - try { - response.flushBuffer(); - } catch (IOException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } - PersistContext.closeSession(); - } - } - - /** - * Provide an easy way to determine the version of the implementation running. - * - * This is an unauthenticated REST call. - */ - private void cloudS3Version( HttpServletRequest request, HttpServletResponse response ) { - String version = new String( "1.04" ); - response.setStatus(200); - endResponse(response, version); - } - - /** - * This request registers the user Cloud.com account holder to the S3 service. The Cloud.com - * account holder saves his API access and secret keys with the S3 service so that - * each rest call he makes can be verified was originated from him. The given API access - * and secret key are saved into the "usercredentials" database table. - * - * This is an unauthenticated REST call. The only required parameters are 'accesskey' and - * 'secretkey'. - * - * To verify that the given keys represent an existing account they are used to execute the - * Cloud.com's listAccounts API function. If the keys do not represent a valid account the - * listAccounts function will fail. - * - * A user can call this REST function any number of times, on each call the Cloud.com secret - * key is simply over writes any previously stored value. - * - * As with all REST calls HTTPS should be used to ensure their security. - */ - private void setUserKeys( HttpServletRequest request, HttpServletResponse response ) { - String[] accessKey = null; - String[] secretKey = null; - - try { - // -> all these parameters are required - accessKey = request.getParameterValues( "accesskey" ); - if ( null == accessKey || 0 == accessKey.length ) { - response.sendError(530, "Missing accesskey parameter" ); - return; - } - - secretKey = request.getParameterValues( "secretkey" ); - if ( null == secretKey || 0 == secretKey.length ) { - response.sendError(530, "Missing secretkey parameter" ); - return; - } - } catch( Exception e ) { - logger.error("SetUserKeys exception " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, "SetUserKeys exception " + e.getMessage()); - return; - } - - try { - // -> use the keys to see if the account actually exists - //ServiceProvider.getInstance().getEC2Engine().validateAccount( accessKey[0], secretKey[0] ); - UserCredentialsDao credentialDao = new UserCredentialsDao(); - credentialDao.setUserKeys( accessKey[0], secretKey[0] ); - - } catch( Exception e ) { - logger.error("SetUserKeys " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; - } - response.setStatus(200); - endResponse(response, "User keys set successfully"); - } - - /** - * We are using the S3AuthParams class to hide where the header values are coming - * from so that the authenticateRequest call can be made from several places. - */ - public static S3AuthParams extractRequestHeaders( HttpServletRequest request ) { - S3AuthParams params = new S3AuthParams(); - - Enumeration headers = request.getHeaderNames(); - if (null != headers) - { - while( headers.hasMoreElements()) - { - HeaderParam oneHeader = new HeaderParam(); - String headerName = (String)headers.nextElement(); - oneHeader.setName( headerName ); - oneHeader.setValue( request.getHeader( headerName )); - params.addHeader( oneHeader ); - } - } - return params; - } - - public static void authenticateRequest( HttpServletRequest request, S3AuthParams params ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - { - RestAuth auth = new RestAuth(ServiceProvider.getInstance().getUseSubDomain()); - String AWSAccessKey = null; - String signature = null; - String authorization = null; - - // [A] Is it an annonymous request? - if (null == (authorization = params.getHeader( "Authorization" ))) { - UserContext.current().initContext(); - return; - } - - // [B] Is it an authenticated request? - int offset = authorization.indexOf( "AWS" ); - if (-1 != offset) { - String temp = authorization.substring( offset+3 ).trim(); - offset = temp.indexOf( ":" ); - AWSAccessKey = temp.substring( 0, offset ); - signature = temp.substring( offset+1 ); - } - - // [C] Calculate the signature from the request's headers - auth.setDateHeader( request.getHeader( "Date" )); - auth.setContentTypeHeader( request.getHeader( "Content-Type" )); - auth.setContentMD5Header( request.getHeader( "Content-MD5" )); - auth.setHostHeader( request.getHeader( "Host" )); - auth.setQueryString( request.getQueryString()); - auth.addUriPath( request.getRequestURI()); - - // -> are their any Amazon specific (i.e. 'x-amz-' ) headers? - HeaderParam[] headers = params.getHeaders(); - for( int i=0; null != headers && i < headers.length; i++ ) - { - String headerName = headers[i].getName(); - String ignoreCase = headerName.toLowerCase(); - if (ignoreCase.startsWith( "x-amz-" )) - auth.addAmazonHeader( headerName + ":" + headers[i].getValue()); - } - - UserInfo info = ServiceProvider.getInstance().getUserInfo(AWSAccessKey); - if (info == null) throw new PermissionDeniedException("Unable to authenticate access key: " + AWSAccessKey); - - try { - if (auth.verifySignature( request.getMethod(), info.getSecretKey(), signature )) { - UserContext.current().initContext(AWSAccessKey, info.getSecretKey(), AWSAccessKey, info.getDescription(), request); - return; - } - - // -> turn off auth - just for testing - //UserContext.current().initContext("Mark", "123", "Mark", "testing", request); - //return; - - } catch (SignatureException e) { - throw new PermissionDeniedException(e); - - } catch (UnsupportedEncodingException e) { - throw new PermissionDeniedException(e); - } - throw new PermissionDeniedException("Invalid signature"); - } - - - private ServletAction routeRequest(HttpServletRequest request) - { - // Simple URL routing for S3 REST calls. - String pathInfo = request.getPathInfo(); - String bucketName = null; - String key = null; - - if (ServiceProvider.getInstance().getUseSubDomain()) - { - String serviceEndpoint = ServiceProvider.getInstance().getServiceEndpoint(); - String host = request.getHeader("Host"); - - // -> a request of "/" on the service endpoint means do a list all my buckets command - if (serviceEndpoint.equalsIgnoreCase( host )) { - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, "/"); - return new S3BucketAction(); - } - - // -> verify the format of the bucket name - int endPos = host.indexOf( ServiceProvider.getInstance().getMasterDomain()); - if ( endPos > 0 ) - { - bucketName = host.substring(0, endPos); - S3Engine.verifyBucketName( bucketName, false ); - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); - } - else request.setAttribute(S3Constants.BUCKET_ATTR_KEY, ""); - - if (pathInfo == null || pathInfo.equalsIgnoreCase("/")) - { - return new S3BucketAction(); - } - else { - String objectKey = pathInfo.substring(1); - request.setAttribute(S3Constants.OBJECT_ATTR_KEY, objectKey); - return new S3ObjectAction(); - } - } - else - { - if(pathInfo == null || pathInfo.equalsIgnoreCase("/")) { - logger.warn("Invalid REST request URI " + pathInfo); - return null; - } - - int endPos = pathInfo.indexOf('/', 1); - if ( endPos > 0 ) - { - bucketName = pathInfo.substring(1, endPos); - key = pathInfo.substring(endPos + 1); - S3Engine.verifyBucketName( bucketName, false ); - - if (!key.isEmpty()) - { - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); - request.setAttribute(S3Constants.OBJECT_ATTR_KEY, pathInfo.substring(endPos + 1)); - return new S3ObjectAction(); - } - else { - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); - return new S3BucketAction(); - } - } - else { - String bucket = pathInfo.substring(1); - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucket); - return new S3BucketAction(); - } - } - } - - public static void endResponse(HttpServletResponse response, String content) { - try { - byte[] data = content.getBytes(); - response.setContentLength(data.length); - OutputStream os = response.getOutputStream(); - os.write(data); - os.close(); - } catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } - } - - public static void writeResponse(HttpServletResponse response, String content) throws IOException { - byte[] data = content.getBytes(); - OutputStream os = response.getOutputStream(); - os.write(data); - } - - public static void writeResponse(HttpServletResponse response, InputStream is) throws IOException { - byte[] data = new byte[4096]; - int length = 0; - while((length = is.read(data)) > 0) { - response.getOutputStream().write(data, 0, length); - } - } - - /** - * A DIME request is really a SOAP request that we are dealing with, and so its - * authentication is the SOAP authentication approach. Since Axis2 does not handle - * DIME messages we deal with them here. - * - * @param request - * @param response - */ - private void processDimeRequest(HttpServletRequest request, HttpServletResponse response) { - S3PutObjectRequest putRequest = null; - S3PutObjectResponse putResponse = null; - int bytesRead = 0; - - S3Engine engine = new S3Engine(); - - try { - logRequest(request); - - MultiPartDimeInputStream ds = new MultiPartDimeInputStream( request.getInputStream()); - - // -> the first stream MUST be the SOAP party - if (ds.nextInputStream()) - { - //logger.debug( "DIME msg [" + ds.getStreamType() + "," + ds.getStreamTypeFormat() + "," + ds.getStreamId() + "]" ); - byte[] buffer = new byte[8192]; - bytesRead = ds.read( buffer, 0, 8192 ); - //logger.debug( "DIME SOAP Bytes read: " + bytesRead ); - ByteArrayInputStream bis = new ByteArrayInputStream( buffer, 0, bytesRead ); - putRequest = toEnginePutObjectRequest( bis ); - } - - // -> we only need to support a DIME message with two bodyparts - if (null != putRequest && ds.nextInputStream()) - { - InputStream is = ds.getInputStream(); - putRequest.setData( is ); - } - - // -> need to do SOAP level auth here, on failure return the SOAP fault - StringBuffer xml = new StringBuffer(); - String AWSAccessKey = putRequest.getAccessKey(); - UserInfo info = ServiceProvider.getInstance().getUserInfo(AWSAccessKey); - try - { S3SoapAuth.verifySignature( putRequest.getSignature(), "PutObject", putRequest.getRawTimestamp(), AWSAccessKey, info.getSecretKey()); - - } catch( AxisFault e ) { - String reason = e.toString(); - int start = reason.indexOf( ".AxisFault:" ); - if (-1 != start) reason = reason.substring( start+11 ); - - xml.append( "" ); - xml.append( "\n" ); - xml.append( "\n" ); - xml.append( "\n" ); - xml.append( "" ).append( e.getFaultCode().toString()).append( "\n" ); - xml.append( "" ).append( reason ).append( "\n" ); - xml.append( "\n" ); - xml.append( "" ); - - endResponse(response, xml.toString()); - PersistContext.commitTransaction(); - return; - } - - // -> PutObject S3 Bucket Policy would be done in the engine.handleRequest() call - UserContext.current().initContext( AWSAccessKey, info.getSecretKey(), AWSAccessKey, "S3 DIME request", request ); - putResponse = engine.handleRequest( putRequest ); - - xml.append( "" ); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ); - xml.append( "\"").append( putResponse.getETag()).append( "\"" ); - xml.append( "").append( DatatypeConverter.printDateTime(putResponse.getLastModified())).append( "" ); - xml.append( "" ); - xml.append( "" ); - - endResponse(response, xml.toString()); - PersistContext.commitTransaction(); - } - catch(PermissionDeniedException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - response.setStatus(403); - endResponse(response, "Access denied"); - } - catch(Throwable e) - { - logger.error("Unexpected exception " + e.getMessage(), e); - } - finally - { - PersistContext.closeSession(); - } - } - - - /** - * Convert the SOAP XML we extract from the DIME message into our local object. - * Here Axis2 is not parsing the SOAP for us. I tried to use the Amazon PutObject - * parser but it keep throwing exceptions. - * - * @param putObjectInline - * @return - * @throws Exception - */ - public static S3PutObjectRequest toEnginePutObjectRequest( InputStream is ) throws Exception - { - DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); - dbf.setNamespaceAware( true ); - - DocumentBuilder db = dbf.newDocumentBuilder(); - Document doc = db.parse( is ); - Node parent = null; - Node contents = null; - NodeList children = null; - String temp = null; - String element = null; - int count = 0; - - S3PutObjectRequest request = new S3PutObjectRequest(); - - // [A] Pull out the simple nodes first - NodeList part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Bucket" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setBucketName( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Key" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setKey( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "ContentLength" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - { - String length = contents.getFirstChild().getNodeValue(); - if (null != length) request.setContentLength( Long.decode( length )); - } - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "AWSAccessKeyId" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setAccessKey( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Signature" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setSignature( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Timestamp" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setRawTimestamp( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "StorageClass" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setStorageClass( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Credential" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setCredential( contents.getFirstChild().getNodeValue()); - } - - - // [B] Get a list of all 'Metadata' elements - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Metadata" ); - if (null != part) - { - count = part.getLength(); - S3MetaDataEntry[] metaEntry = new S3MetaDataEntry[ count ]; - - for( int i=0; i < count; i++ ) - { - parent = part.item(i); - metaEntry[i] = new S3MetaDataEntry(); - - // -> get a list of all the children elements of the 'Metadata' parent element - if (null != (children = parent.getChildNodes())) - { - int numChildren = children.getLength(); - for( int j=0; j < numChildren; j++ ) - { - contents = children.item( j ); - element = contents.getNodeName().trim(); - if ( element.endsWith( "Name" )) - { - temp = contents.getFirstChild().getNodeValue(); - if (null != temp) metaEntry[i].setName( temp ); - } - else if (element.endsWith( "Value" )) - { - temp = contents.getFirstChild().getNodeValue(); - if (null != temp) metaEntry[i].setValue( temp ); - } - } - } - } - request.setMetaEntries( metaEntry ); - } - - // [C] Get a list of all Grant elements in an AccessControlList - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Grant" ); - if (null != part) - { - S3AccessControlList engineAcl = new S3AccessControlList(); - - count = part.getLength(); - for( int i=0; i < count; i++ ) - { - parent = part.item(i); - S3Grant engineGrant = new S3Grant(); - - // -> get a list of all the children elements of the 'Grant' parent element - if (null != (children = parent.getChildNodes())) - { - int numChildren = children.getLength(); - for( int j=0; j < numChildren; j++ ) - { - contents = children.item( j ); - element = contents.getNodeName().trim(); - if ( element.endsWith( "Grantee" )) - { - NamedNodeMap attbs = contents.getAttributes(); - if (null != attbs) - { - Node type = attbs.getNamedItemNS( "http://www.w3.org/2001/XMLSchema-instance", "type" ); - if ( null != type ) - temp = type.getFirstChild().getNodeValue().trim(); - else temp = null; - - if ( null != temp && temp.equalsIgnoreCase( "CanonicalUser" )) - { - engineGrant.setGrantee(SAcl.GRANTEE_USER); - engineGrant.setCanonicalUserID( getChildNodeValue( contents, "ID" )); - } - else throw new UnsupportedOperationException( "Missing http://www.w3.org/2001/XMLSchema-instance:type value" ); - } - } - else if (element.endsWith( "Permission" )) - { - temp = contents.getFirstChild().getNodeValue().trim(); - if (temp.equalsIgnoreCase("READ" )) engineGrant.setPermission(SAcl.PERMISSION_READ); - else if (temp.equalsIgnoreCase("WRITE" )) engineGrant.setPermission(SAcl.PERMISSION_WRITE); - else if (temp.equalsIgnoreCase("READ_ACP" )) engineGrant.setPermission(SAcl.PERMISSION_READ_ACL); - else if (temp.equalsIgnoreCase("WRITE_ACP" )) engineGrant.setPermission(SAcl.PERMISSION_WRITE_ACL); - else if (temp.equalsIgnoreCase("FULL_CONTROL")) engineGrant.setPermission(SAcl.PERMISSION_FULL); - else throw new UnsupportedOperationException( "Unsupported permission: " + temp ); - } - } - engineAcl.addGrant( engineGrant ); - } - } - request.setAcl( engineAcl ); - } - return request; - } - - /** - * Have to deal with XML with and without namespaces. - */ - public static NodeList getElement( Document doc, String namespace, String tagName ) - { - NodeList part = doc.getElementsByTagNameNS( namespace, tagName ); - if (null == part || 0 == part.getLength()) part = doc.getElementsByTagName( tagName ); - - return part; - } - - /** - * Looking for the value of a specific child of the given parent node. - * - * @param parent - * @param childName - * @return - */ - private static String getChildNodeValue( Node parent, String childName ) - { - NodeList children = null; - Node element = null; - - if (null != (children = parent.getChildNodes())) - { - int numChildren = children.getLength(); - for( int i=0; i < numChildren; i++ ) - { - if (null != (element = children.item( i ))) - { - // -> name may have a namespace on it - String name = element.getNodeName().trim(); - if ( name.endsWith( childName )) - { - String value = element.getFirstChild().getNodeValue(); - if (null != value) value = value.trim(); - return value; - } - } - } - } - return null; - } - - private void logRequest(HttpServletRequest request) { - if(logger.isInfoEnabled()) { - logger.info("Request method: " + request.getMethod()); - logger.info("Request contextPath: " + request.getContextPath()); - logger.info("Request pathInfo: " + request.getPathInfo()); - logger.info("Request pathTranslated: " + request.getPathTranslated()); - logger.info("Request queryString: " + request.getQueryString()); - logger.info("Request requestURI: " + request.getRequestURI()); - logger.info("Request requestURL: " + request.getRequestURL()); - logger.info("Request servletPath: " + request.getServletPath()); - Enumeration headers = request.getHeaderNames(); - if(headers != null) { - while(headers.hasMoreElements()) { - Object headerName = headers.nextElement(); - logger.info("Request header " + headerName + ":" + request.getHeader((String)headerName)); - } - } - - Enumeration params = request.getParameterNames(); - if(params != null) { - while(params.hasMoreElements()) { - Object paramName = params.nextElement(); - logger.info("Request parameter " + paramName + ":" + - request.getParameter((String)paramName)); - } - } - logger.info( "- End of request -" ); - } - } -} -======= /* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. * @@ -1600,5 +835,4 @@ private S3ObjectAction routePlainPostRequest (HttpServletRequest request) logger.info( "- End of request -" ); } } -} ->>>>>>> 6472e7b... Now really adding the renamed files! +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/UserContext.java b/awsapi/src/com/cloud/bridge/service/UserContext.java index ff5b7d80e93..9fdc06ca134 100644 --- a/awsapi/src/com/cloud/bridge/service/UserContext.java +++ b/awsapi/src/com/cloud/bridge/service/UserContext.java @@ -33,11 +33,7 @@ public class UserContext { private boolean annonymous = false; private String accessKey; private String secretKey; -<<<<<<< HEAD - private String canonicalUserId; // -> for us this is the accessKey -======= private String canonicalUserId; // In our design, we re-use the accessKey to provide the canonicalUserId -- TODO loPri - reconsider? ->>>>>>> 6472e7b... Now really adding the renamed files! private String description; private HttpServletRequest request = null; diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java b/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java index 0e0d215678c..c614509c3cb 100644 --- a/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java +++ b/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java @@ -1,919 +1,3 @@ -<<<<<<< HEAD -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.controller.s3; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.Reader; -import java.io.StringWriter; -import java.io.Writer; -import java.util.Calendar; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.xml.bind.DatatypeConverter; -import javax.xml.namespace.QName; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.stream.XMLOutputFactory; -import javax.xml.stream.XMLStreamException; -import javax.xml.stream.XMLStreamWriter; - -import org.apache.axiom.om.OMAbstractFactory; -import org.apache.axiom.om.OMFactory; -import org.apache.axis2.databinding.utils.writer.MTOMAwareXMLSerializer; -import org.apache.log4j.Logger; -import org.json.simple.parser.ParseException; -import org.w3c.dom.Document; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.amazon.s3.GetBucketAccessControlPolicyResponse; -import com.amazon.s3.ListAllMyBucketsResponse; -import com.amazon.s3.ListBucketResponse; -import com.cloud.bridge.model.SAcl; -import com.cloud.bridge.model.SBucket; -import com.cloud.bridge.persist.dao.BucketPolicyDao; -import com.cloud.bridge.persist.dao.MultipartLoadDao; -import com.cloud.bridge.persist.dao.SBucketDao; -import com.cloud.bridge.service.S3Constants; -import com.cloud.bridge.service.S3RestServlet; -import com.cloud.bridge.service.S3SoapServiceImpl; -import com.cloud.bridge.service.ServiceProvider; -import com.cloud.bridge.service.ServletAction; -import com.cloud.bridge.service.UserContext; -import com.cloud.bridge.service.core.s3.S3AccessControlPolicy; -import com.cloud.bridge.service.core.s3.S3BucketPolicy; -import com.cloud.bridge.service.core.s3.S3CanonicalUser; -import com.cloud.bridge.service.core.s3.S3CreateBucketConfiguration; -import com.cloud.bridge.service.core.s3.S3CreateBucketRequest; -import com.cloud.bridge.service.core.s3.S3CreateBucketResponse; -import com.cloud.bridge.service.core.s3.S3DeleteBucketRequest; -import com.cloud.bridge.service.core.s3.S3Engine; -import com.cloud.bridge.service.core.s3.S3GetBucketAccessControlPolicyRequest; -import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsRequest; -import com.cloud.bridge.service.core.s3.S3ListAllMyBucketsResponse; -import com.cloud.bridge.service.core.s3.S3ListBucketObjectEntry; -import com.cloud.bridge.service.core.s3.S3ListBucketRequest; -import com.cloud.bridge.service.core.s3.S3ListBucketResponse; -import com.cloud.bridge.service.core.s3.S3MultipartUpload; -import com.cloud.bridge.service.core.s3.S3PolicyContext; -import com.cloud.bridge.service.core.s3.S3PutObjectRequest; -import com.cloud.bridge.service.core.s3.S3Response; -import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest; -import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess; -import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; -import com.cloud.bridge.service.core.s3.S3PolicyCondition.ConditionKeys; -import com.cloud.bridge.service.exception.InvalidRequestContentException; -import com.cloud.bridge.service.exception.NetworkIOException; -import com.cloud.bridge.service.exception.PermissionDeniedException; -import com.cloud.bridge.util.Converter; -import com.cloud.bridge.util.PolicyParser; -import com.cloud.bridge.util.StringHelper; -import com.cloud.bridge.util.Tuple; -import com.cloud.bridge.util.XSerializer; -import com.cloud.bridge.util.XSerializerXmlAdapter; - - -/** - * @author Kelven Yang - */ -public class S3BucketAction implements ServletAction { - protected final static Logger logger = Logger.getLogger(S3BucketAction.class); - - private DocumentBuilderFactory dbf = null; - private OMFactory factory = OMAbstractFactory.getOMFactory(); - private XMLOutputFactory xmlOutFactory = XMLOutputFactory.newInstance(); - - public S3BucketAction() { - dbf = DocumentBuilderFactory.newInstance(); - dbf.setNamespaceAware( true ); - - } - - public void execute(HttpServletRequest request, HttpServletResponse response) - throws IOException, XMLStreamException - { - String method = request.getMethod(); - String queryString = request.getQueryString(); - - if ( method.equalsIgnoreCase("PUT")) - { - if ( queryString != null && queryString.length() > 0 ) - { - if ( queryString.startsWith("acl")) { - executePutBucketAcl(request, response); - return; - } - else if (queryString.startsWith("versioning")) { - executePutBucketVersioning(request, response); - return; - } - else if (queryString.startsWith("policy")) { - executePutBucketPolicy(request, response); - return; - } - else if (queryString.startsWith("logging")) { - executePutBucketLogging(request, response); - return; - } - else if (queryString.startsWith("website")) { - executePutBucketWebsite(request, response); - return; - } - } - executePutBucket(request, response); - } - else if(method.equalsIgnoreCase("GET")) - { - if (queryString != null && queryString.length() > 0) - { - if ( queryString.startsWith("acl")) { - executeGetBucketAcl(request, response); - return; - } - else if (queryString.startsWith("versioning")) { - executeGetBucketVersioning(request, response); - return; - } - else if (queryString.contains("versions")) { - executeGetBucketObjectVersions(request, response); - return; - } - else if (queryString.startsWith("location")) { - executeGetBucketLocation(request, response); - return; - } - else if (queryString.startsWith("uploads")) { - executeListMultipartUploads(request, response); - return; - } - else if (queryString.startsWith("policy")) { - executeGetBucketPolicy(request, response); - return; - } - else if (queryString.startsWith("logging")) { - executeGetBucketLogging(request, response); - return; - } - else if (queryString.startsWith("website")) { - executeGetBucketWebsite(request, response); - return; - } - } - - String bucketAtr = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - if ( bucketAtr.equals( "/" )) - executeGetAllBuckets(request, response); - else executeGetBucket(request, response); - } - else if (method.equalsIgnoreCase("DELETE")) - { - if (queryString != null && queryString.length() > 0) - { - if ( queryString.startsWith("policy")) { - executeDeleteBucketPolicy(request, response); - return; - } - else if (queryString.startsWith("website")) { - executeDeleteBucketWebsite(request, response); - return; - } - - } - executeDeleteBucket(request, response); - } - else throw new IllegalArgumentException("Unsupported method in REST request"); - } - - /** - * In order to support a policy on the "s3:CreateBucket" action we must be able to set and get - * policies before a bucket is actually created. - * - * @param request - * @param response - * @throws IOException - */ - private void executePutBucketPolicy(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String policy = streamToString( request.getInputStream()); - - // [A] Is there an owner of an existing policy or bucket? - BucketPolicyDao policyDao = new BucketPolicyDao(); - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName( bucketName ); - String owner = null; - - if ( null != bucket ) - { - owner = bucket.getOwnerCanonicalId(); - } - else - { try { - owner = policyDao.getPolicyOwner( bucketName ); - } - catch( Exception e ) {} - } - - - // [B] "The bucket owner by default has permissions to attach bucket policies to their buckets using PUT Bucket policy." - // -> the bucket owner may want to restrict the IP address from where this can be executed - String client = UserContext.current().getCanonicalUserId(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketPolicy, bucketName ); - switch( S3Engine.verifyPolicy( context )) { - case ALLOW: - break; - - case DEFAULT_DENY: - if (null != owner && !client.equals( owner )) { - response.setStatus(405); - return; - } - break; - - case DENY: - response.setStatus(403); - return; - } - - - // [B] Place the policy into the database over writting an existing policy - try { - // -> first make sure that the policy is valid by parsing it - PolicyParser parser = new PolicyParser(); - S3BucketPolicy sbp = parser.parse( policy, bucketName ); - - policyDao.deletePolicy( bucketName ); - if (null != policy && !policy.isEmpty()) policyDao.addPolicy( bucketName, client, policy ); - - if (null != sbp) ServiceProvider.getInstance().setBucketPolicy( bucketName, sbp ); - response.setStatus(200); - } - catch( PermissionDeniedException e ) { - logger.error("Put Bucket Policy failed due to " + e.getMessage(), e); - throw e; - } - catch( ParseException e ) { - logger.error("Put Bucket Policy failed due to " + e.getMessage(), e); - throw new PermissionDeniedException( e.toString()); - } - catch( Exception e ) { - logger.error("Put Bucket Policy failed due to " + e.getMessage(), e); - response.setStatus(500); - } - } - - private void executeGetBucketPolicy(HttpServletRequest request, HttpServletResponse response) - { - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - - // [A] Is there an owner of an existing policy or bucket? - BucketPolicyDao policyDao = new BucketPolicyDao(); - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName( bucketName ); - String owner = null; - - if ( null != bucket ) - { - owner = bucket.getOwnerCanonicalId(); - } - else - { try { - owner = policyDao.getPolicyOwner( bucketName ); - } - catch( Exception e ) {} - } - - - // [B] "The bucket owner by default has permissions to retrieve bucket policies using GET Bucket policy." - // -> the bucket owner may want to restrict the IP address from where this can be executed - String client = UserContext.current().getCanonicalUserId(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketPolicy, bucketName ); - switch( S3Engine.verifyPolicy( context )) { - case ALLOW: - break; - - case DEFAULT_DENY: - if (null != owner && !client.equals( owner )) { - response.setStatus(405); - return; - } - break; - - case DENY: - response.setStatus(403); - return; - } - - - // [B] Pull the policy from the database if one exists - try { - String policy = policyDao.getPolicy( bucketName ); - if ( null == policy ) { - response.setStatus(404); - } - else { - response.setStatus(200); - response.setContentType("application/json"); - S3RestServlet.endResponse(response, policy); - } - } - catch( Exception e ) { - logger.error("Get Bucket Policy failed due to " + e.getMessage(), e); - response.setStatus(500); - } - } - - private void executeDeleteBucketPolicy(HttpServletRequest request, HttpServletResponse response) - { - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName( bucketName ); - if (bucket != null) - { - String client = UserContext.current().getCanonicalUserId(); - if (!client.equals( bucket.getOwnerCanonicalId())) { - response.setStatus(405); - return; - } - } - - try { - BucketPolicyDao policyDao = new BucketPolicyDao(); - String policy = policyDao.getPolicy( bucketName ); - if ( null == policy ) { - response.setStatus(204); - } - else { - ServiceProvider.getInstance().deleteBucketPolicy( bucketName ); - policyDao.deletePolicy( bucketName ); - response.setStatus(200); - } - } - catch( Exception e ) { - logger.error("Delete Bucket Policy failed due to " + e.getMessage(), e); - response.setStatus(500); - } - } - - public void executeGetAllBuckets(HttpServletRequest request, HttpServletResponse response) - throws IOException, XMLStreamException - { - Calendar cal = Calendar.getInstance(); - cal.set( 1970, 1, 1 ); - S3ListAllMyBucketsRequest engineRequest = new S3ListAllMyBucketsRequest(); - engineRequest.setAccessKey(UserContext.current().getAccessKey()); - engineRequest.setRequestTimestamp( cal ); - engineRequest.setSignature( "" ); - - S3ListAllMyBucketsResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - - // -> serialize using the apache's Axiom classes - ListAllMyBucketsResponse allBuckets = S3SoapServiceImpl.toListAllMyBucketsResponse( engineResponse ); - - OutputStream os = response.getOutputStream(); - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - XMLStreamWriter xmlWriter = xmlOutFactory.createXMLStreamWriter( os ); - String documentStart = new String( "" ); - os.write( documentStart.getBytes()); - MTOMAwareXMLSerializer MTOMWriter = new MTOMAwareXMLSerializer( xmlWriter ); - allBuckets.serialize( new QName( "http://s3.amazonaws.com/doc/2006-03-01/", "ListAllMyBucketsResponse", "ns1" ), factory, MTOMWriter ); - xmlWriter.flush(); - xmlWriter.close(); - os.close(); - } - - public void executeGetBucket(HttpServletRequest request, HttpServletResponse response) - throws IOException, XMLStreamException - { - S3ListBucketRequest engineRequest = new S3ListBucketRequest(); - engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); - engineRequest.setDelimiter(request.getParameter("delimiter")); - engineRequest.setMarker(request.getParameter("marker")); - engineRequest.setPrefix(request.getParameter("prefix")); - - int maxKeys = Converter.toInt(request.getParameter("max-keys"), 1000); - engineRequest.setMaxKeys(maxKeys); - S3ListBucketResponse engineResponse = ServiceProvider.getInstance().getS3Engine().listBucketContents( engineRequest, false ); - - // -> serialize using the apache's Axiom classes - ListBucketResponse oneBucket = S3SoapServiceImpl.toListBucketResponse( engineResponse ); - - OutputStream os = response.getOutputStream(); - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - XMLStreamWriter xmlWriter = xmlOutFactory.createXMLStreamWriter( os ); - String documentStart = new String( "" ); - os.write( documentStart.getBytes()); - MTOMAwareXMLSerializer MTOMWriter = new MTOMAwareXMLSerializer( xmlWriter ); - oneBucket.serialize( new QName( "http://s3.amazonaws.com/doc/2006-03-01/", "ListBucketResponse", "ns1" ), factory, MTOMWriter ); - xmlWriter.flush(); - xmlWriter.close(); - os.close(); - } - - public void executeGetBucketAcl(HttpServletRequest request, HttpServletResponse response) - throws IOException, XMLStreamException - { - S3GetBucketAccessControlPolicyRequest engineRequest = new S3GetBucketAccessControlPolicyRequest(); - Calendar cal = Calendar.getInstance(); - cal.set( 1970, 1, 1 ); - engineRequest.setAccessKey(UserContext.current().getAccessKey()); - engineRequest.setRequestTimestamp( cal ); - engineRequest.setSignature( "" ); - engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); - - S3AccessControlPolicy engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - - // -> serialize using the apache's Axiom classes - GetBucketAccessControlPolicyResponse onePolicy = S3SoapServiceImpl.toGetBucketAccessControlPolicyResponse( engineResponse ); - - OutputStream os = response.getOutputStream(); - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - XMLStreamWriter xmlWriter = xmlOutFactory.createXMLStreamWriter( os ); - String documentStart = new String( "" ); - os.write( documentStart.getBytes()); - MTOMAwareXMLSerializer MTOMWriter = new MTOMAwareXMLSerializer( xmlWriter ); - onePolicy.serialize( new QName( "http://s3.amazonaws.com/doc/2006-03-01/", "GetBucketAccessControlPolicyResponse", "ns1" ), factory, MTOMWriter ); - xmlWriter.flush(); - xmlWriter.close(); - os.close(); - } - - public void executeGetBucketVersioning(HttpServletRequest request, HttpServletResponse response) throws IOException - { - // [A] Does the bucket exist? - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String versioningStatus = null; - - if (null == bucketName) { - logger.error( "executeGetBucketVersioning - no bucket name given" ); - response.setStatus( 400 ); - return; - } - - SBucketDao bucketDao = new SBucketDao(); - SBucket sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) { - response.setStatus( 404 ); - return; - } - - // [B] The owner may want to restrict the IP address at which this can be performed - String client = UserContext.current().getCanonicalUserId(); - if (!client.equals( sbucket.getOwnerCanonicalId())) - throw new PermissionDeniedException( "Access Denied - only the owner can read bucket versioning" ); - - S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketVersioning, bucketName ); - if (PolicyAccess.DENY == S3Engine.verifyPolicy( context )) { - response.setStatus(403); - return; - } - - - // [C] - switch( sbucket.getVersioningStatus()) { - default: - case 0: versioningStatus = ""; break; - case 1: versioningStatus = "Enabled"; break; - case 2: versioningStatus = "Suspended"; break; - } - - StringBuffer xml = new StringBuffer(); - xml.append( "" ); - xml.append( "" ); - if (0 < versioningStatus.length()) xml.append( "" ).append( versioningStatus ).append( "" ); - xml.append( "" ); - - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - public void executeGetBucketObjectVersions(HttpServletRequest request, HttpServletResponse response) throws IOException - { - S3ListBucketRequest engineRequest = new S3ListBucketRequest(); - String keyMarker = request.getParameter("key-marker"); - String versionIdMarker = request.getParameter("version-id-marker"); - - engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); - engineRequest.setDelimiter(request.getParameter("delimiter")); - engineRequest.setMarker( keyMarker ); - engineRequest.setPrefix(request.getParameter("prefix")); - engineRequest.setVersionIdMarker( versionIdMarker ); - - int maxKeys = Converter.toInt(request.getParameter("max-keys"), 1000); - engineRequest.setMaxKeys(maxKeys); - S3ListBucketResponse engineResponse = ServiceProvider.getInstance().getS3Engine().listBucketContents( engineRequest, true ); - - // -> the SOAP version produces different XML - StringBuffer xml = new StringBuffer(); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ).append( engineResponse.getBucketName()).append( "" ); - - if ( null == keyMarker ) - xml.append( "" ); - else xml.append( "" ).append( keyMarker ).append( "" ); - else xml.append( "" ).append( keyMarker ).append( "" ).append( engineResponse.getMaxKeys()).append( "" ); - xml.append( "" ).append( engineResponse.isTruncated()).append( "" ); - - S3ListBucketObjectEntry[] versions = engineResponse.getContents(); - for( int i=0; null != versions && i < versions.length; i++ ) - { - S3CanonicalUser owner = versions[i].getOwner(); - boolean isDeletionMarker = versions[i].getIsDeletionMarker(); - String displayName = owner.getDisplayName(); - String id = owner.getID(); - - if ( isDeletionMarker ) - { - xml.append( "" ); - xml.append( "" ).append( versions[i].getKey()).append( "" ); - xml.append( "" ).append( versions[i].getVersion()).append( "" ); - xml.append( "" ).append( versions[i].getIsLatest()).append( "" ); - xml.append( "" ).append( DatatypeConverter.printDateTime( versions[i].getLastModified())).append( "" ); - } - else - { xml.append( "" ); - xml.append( "" ).append( versions[i].getKey()).append( "" ); - xml.append( "" ).append( versions[i].getVersion()).append( "" ); - xml.append( "" ).append( versions[i].getIsLatest()).append( "" ); - xml.append( "" ).append( DatatypeConverter.printDateTime( versions[i].getLastModified())).append( "" ); - xml.append( "" ).append( versions[i].getETag()).append( "" ); - xml.append( "" ).append( versions[i].getSize()).append( "" ); - xml.append( "" ).append( versions[i].getStorageClass()).append( "" ); - } - - xml.append( "" ); - xml.append( "" ).append( id ).append( "" ); - if ( null == displayName ) - xml.append( "" ); - else xml.append( "" ).append( owner.getDisplayName()).append( "" ); - xml.append( "" ); - - if ( isDeletionMarker ) - xml.append( "" ); - else xml.append( "" ); - } - xml.append( "" ); - - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - public void executeGetBucketLogging(HttpServletRequest request, HttpServletResponse response) throws IOException { - // TODO -- this is a beta feature of S3 - response.setStatus(501); - } - - public void executeGetBucketLocation(HttpServletRequest request, HttpServletResponse response) throws IOException { - response.setStatus(501); - } - - public void executeGetBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException { - response.setStatus(501); - } - - public void executeDeleteBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException { - response.setStatus(501); - } - - public void executePutBucket(HttpServletRequest request, HttpServletResponse response) throws IOException - { - int contentLength = request.getContentLength(); - Object objectInContent = null; - - if(contentLength > 0) - { - InputStream is = null; - try { - is = request.getInputStream(); - String xml = StringHelper.stringFromStream(is); - XSerializer serializer = new XSerializer(new XSerializerXmlAdapter()); - objectInContent = serializer.serializeFrom(xml); - if(objectInContent != null && !(objectInContent instanceof S3CreateBucketConfiguration)) { - throw new InvalidRequestContentException("Invalid rquest content in create-bucket: " + xml); - } - is.close(); - - } catch (IOException e) { - logger.error("Unable to read request data due to " + e.getMessage(), e); - throw new NetworkIOException(e); - - } finally { - if(is != null) is.close(); - } - } - - S3CreateBucketRequest engineRequest = new S3CreateBucketRequest(); - engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); - engineRequest.setConfig((S3CreateBucketConfiguration)objectInContent); - - S3CreateBucketResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.addHeader("Location", "/" + engineResponse.getBucketName()); - response.setContentLength(0); - response.setStatus(200); - response.flushBuffer(); - } - - public void executePutBucketAcl(HttpServletRequest request, HttpServletResponse response) throws IOException - { - S3PutObjectRequest putRequest = null; - - // -> reuse the Access Control List parsing code that was added to support DIME - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - try { - putRequest = S3RestServlet.toEnginePutObjectRequest( request.getInputStream()); - } - catch( Exception e ) { - throw new IOException( e.toString()); - } - - // -> reuse the SOAP code to save the passed in ACLs - S3SetBucketAccessControlPolicyRequest engineRequest = new S3SetBucketAccessControlPolicyRequest(); - engineRequest.setBucketName( bucketName ); - engineRequest.setAcl( putRequest.getAcl()); - - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.setStatus( engineResponse.getResultCode()); - } - - public void executePutBucketVersioning(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String versioningStatus = null; - Node item = null; - - if (null == bucketName) { - logger.error( "executePutBucketVersioning - no bucket name given" ); - response.setStatus( 400 ); - return; - } - - // -> is the XML as defined? - try { - DocumentBuilder db = dbf.newDocumentBuilder(); - Document restXML = db.parse( request.getInputStream()); - NodeList match = S3RestServlet.getElement( restXML, "http://s3.amazonaws.com/doc/2006-03-01/", "Status" ); - if ( 0 < match.getLength()) - { - item = match.item(0); - versioningStatus = new String( item.getFirstChild().getNodeValue()); - } - else - { logger.error( "executePutBucketVersioning - cannot find Status tag in XML body" ); - response.setStatus( 400 ); - return; - } - } - catch( Exception e ) { - logger.error( "executePutBucketVersioning - failed to parse XML due to " + e.getMessage(), e); - response.setStatus(400); - return; - } - - try { - // -> does not matter what the ACLs say only the owner can turn on versioning on a bucket - // -> the bucket owner may want to restrict the IP address from which this can occur - SBucketDao bucketDao = new SBucketDao(); - SBucket sbucket = bucketDao.getByName( bucketName ); - - String client = UserContext.current().getCanonicalUserId(); - if (!client.equals( sbucket.getOwnerCanonicalId())) - throw new PermissionDeniedException( "Access Denied - only the owner can turn on versioing on a bucket" ); - - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketVersioning, bucketName ); - if (PolicyAccess.DENY == S3Engine.verifyPolicy( context )) { - response.setStatus(403); - return; - } - - - if (versioningStatus.equalsIgnoreCase( "Enabled" )) sbucket.setVersioningStatus( 1 ); - else if (versioningStatus.equalsIgnoreCase( "Suspended")) sbucket.setVersioningStatus( 2 ); - else { - logger.error( "executePutBucketVersioning - unknown state: [" + versioningStatus + "]" ); - response.setStatus( 400 ); - return; - } - bucketDao.update( sbucket ); - - } catch( PermissionDeniedException e ) { - logger.error( "executePutBucketVersioning - failed due to " + e.getMessage(), e); - throw e; - - } catch( Exception e ) { - logger.error( "executePutBucketVersioning - failed due to " + e.getMessage(), e); - response.setStatus(500); - return; - } - response.setStatus(200); - } - - public void executePutBucketLogging(HttpServletRequest request, HttpServletResponse response) throws IOException { - // TODO -- this is a S3 beta feature - response.setStatus(501); - } - - public void executePutBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException { - response.setStatus(501); - } - - public void executeDeleteBucket(HttpServletRequest request, HttpServletResponse response) throws IOException - { - S3DeleteBucketRequest engineRequest = new S3DeleteBucketRequest(); - engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.setStatus(engineResponse.getResultCode()); - response.flushBuffer(); - } - - /** - * This is a very complex function with all the options defined by Amazon. Part of the functionality is - * provided by the query done against the database. The CommonPrefixes functionality is done the same way - * as done in the listBucketContents function (i.e., by iterating though the list to decide which output - * element each key is placed). - * - * @param request - * @param response - * @throws IOException - */ - public void executeListMultipartUploads(HttpServletRequest request, HttpServletResponse response) throws IOException - { - // [A] Obtain parameters and do basic bucket verification - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String delimiter = request.getParameter("delimiter"); - String keyMarker = request.getParameter("key-marker"); - String prefix = request.getParameter("prefix"); - int maxUploads = 1000; - int nextUploadId = 0; - String nextKey = null; - boolean isTruncated = false; - S3MultipartUpload[] uploads = null; - S3MultipartUpload onePart = null; - - String temp = request.getParameter("max-uploads"); - if (null != temp) { - maxUploads = Integer.parseInt( temp ); - if (maxUploads > 1000 || maxUploads < 0) maxUploads = 1000; - } - - // -> upload-id-marker is ignored unless key-marker is also specified - String uploadIdMarker = request.getParameter("upload-id-marker"); - if (null == keyMarker) uploadIdMarker = null; - - // -> does the bucket exist, we may need it to verify access permissions - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "listMultipartUpload failed since " + bucketName + " does not exist" ); - response.setStatus(404); - return; - } - - S3PolicyContext context = new S3PolicyContext( PolicyActions.ListBucketMultipartUploads, bucketName ); - context.setEvalParam( ConditionKeys.Prefix, prefix ); - context.setEvalParam( ConditionKeys.Delimiter, delimiter ); - S3Engine.verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_READ ); - - - // [B] Query the multipart table to get the list of current uploads - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - Tuple result = uploadDao.getInitiatedUploads( bucketName, maxUploads, prefix, keyMarker, uploadIdMarker ); - uploads = result.getFirst(); - isTruncated = result.getSecond().booleanValue(); - } - catch( Exception e ) { - logger.error("List Multipart Uploads failed due to " + e.getMessage(), e); - response.setStatus(500); - } - - StringBuffer xml = new StringBuffer(); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ).append( bucketName ).append( "" ); - xml.append( "").append((null == keyMarker ? "" : keyMarker)).append( "" ); - xml.append( "").append((null == uploadIdMarker ? "" : uploadIdMarker)).append( "" ); - - - // [C] Construct the contents of the element - StringBuffer partsList = new StringBuffer(); - for( int i=0; i < uploads.length; i++ ) - { - onePart = uploads[i]; - if (null == onePart) break; - - if (delimiter != null && !delimiter.isEmpty()) - { - // -> is this available only in the CommonPrefixes element? - if (StringHelper.substringInBetween(onePart.getKey(), prefix, delimiter) != null) - continue; - } - - nextKey = onePart.getKey(); - nextUploadId = onePart.getId(); - partsList.append( "" ); - partsList.append( "" ).append( nextKey ).append( "" ); - partsList.append( "" ).append( nextUploadId ).append( "" ); - partsList.append( "" ); - partsList.append( "" ).append( onePart.getAccessKey()).append( "" ); - partsList.append( "" ); - partsList.append( "" ); - partsList.append( "" ); - partsList.append( "" ).append( onePart.getAccessKey()).append( "" ); - partsList.append( "" ); - partsList.append( "" ); - partsList.append( "STANDARD" ); - partsList.append( "" ).append( DatatypeConverter.printDateTime( onePart.getLastModified())).append( "" ); - partsList.append( "" ); - } - - // [D] Construct the contents of the elements (if any) - for( int i=0; i < uploads.length; i++ ) - { - onePart = uploads[i]; - if (null == onePart) break; - - if (delimiter != null && !delimiter.isEmpty()) - { - String subName = StringHelper.substringInBetween(onePart.getKey(), prefix, delimiter); - if (subName != null) - { - partsList.append( "" ); - partsList.append( "" ); - if ( prefix != null && prefix.length() > 0 ) - partsList.append( prefix + delimiter + subName ); - else partsList.append( subName ); - partsList.append( "" ); - partsList.append( "" ); - } - } - } - - // [D] Finish off the response - xml.append( "" ).append((null == nextKey ? "" : nextKey)).append( "" ); - xml.append( "" ).append((0 == nextUploadId ? "" : nextUploadId)).append( "" ); - xml.append( "" ).append( maxUploads ).append( "" ); - xml.append( "" ).append( isTruncated ).append( "" ); - - xml.append( partsList.toString()); - xml.append( "" ); - - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - private String streamToString( InputStream is ) throws IOException - { - int n = 0; - - if ( null != is ) - { - Writer writer = new StringWriter(); - char[] buffer = new char[1024]; - try { - Reader reader = new BufferedReader( new InputStreamReader(is, "UTF-8")); - while ((n = reader.read(buffer)) != -1) writer.write(buffer, 0, n); - } - finally { - is.close(); - } - return writer.toString(); - } - else return null; - } -} -======= /* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. * @@ -1858,5 +942,4 @@ public class S3BucketAction implements ServletAction { } else return null; } -} ->>>>>>> 6472e7b... Now really adding the renamed files! +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/S3ObjectAction.java b/awsapi/src/com/cloud/bridge/service/controller/s3/S3ObjectAction.java index 148ed57968a..778ebd0bcea 100644 --- a/awsapi/src/com/cloud/bridge/service/controller/s3/S3ObjectAction.java +++ b/awsapi/src/com/cloud/bridge/service/controller/s3/S3ObjectAction.java @@ -1,1502 +1,1258 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.controller.s3; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.Date; -import java.util.Enumeration; -import java.util.List; -import java.util.UUID; - -import javax.activation.DataHandler; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.xml.bind.DatatypeConverter; -<<<<<<< HEAD -import javax.xml.namespace.QName; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.stream.XMLOutputFactory; -import javax.xml.stream.XMLStreamException; -import javax.xml.stream.XMLStreamWriter; - -import org.apache.axiom.om.OMAbstractFactory; -import org.apache.axiom.om.OMFactory; -import org.apache.axis2.databinding.utils.writer.MTOMAwareXMLSerializer; -======= -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.stream.XMLStreamException; - ->>>>>>> 6472e7b... Now really adding the renamed files! -import org.apache.log4j.Logger; -import org.w3c.dom.Document; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.amazon.s3.CopyObjectResponse; -import com.amazon.s3.GetObjectAccessControlPolicyResponse; -<<<<<<< HEAD -======= -import com.cloud.bridge.io.MTOMAwareResultStreamWriter; ->>>>>>> 6472e7b... Now really adding the renamed files! -import com.cloud.bridge.model.SAcl; -import com.cloud.bridge.model.SBucket; -import com.cloud.bridge.persist.dao.MultipartLoadDao; -import com.cloud.bridge.persist.dao.SBucketDao; -import com.cloud.bridge.service.S3Constants; -import com.cloud.bridge.service.S3RestServlet; -<<<<<<< HEAD -import com.cloud.bridge.service.S3SoapServiceImpl; -import com.cloud.bridge.service.ServiceProvider; -import com.cloud.bridge.service.ServletAction; -import com.cloud.bridge.service.UserContext; -======= -import com.cloud.bridge.service.UserContext; -import com.cloud.bridge.service.core.s3.S3AccessControlList; ->>>>>>> 6472e7b... Now really adding the renamed files! -import com.cloud.bridge.service.core.s3.S3AccessControlPolicy; -import com.cloud.bridge.service.core.s3.S3AuthParams; -import com.cloud.bridge.service.core.s3.S3ConditionalHeaders; -import com.cloud.bridge.service.core.s3.S3CopyObjectRequest; -import com.cloud.bridge.service.core.s3.S3CopyObjectResponse; -import com.cloud.bridge.service.core.s3.S3DeleteObjectRequest; -import com.cloud.bridge.service.core.s3.S3Engine; -import com.cloud.bridge.service.core.s3.S3GetObjectAccessControlPolicyRequest; -import com.cloud.bridge.service.core.s3.S3GetObjectRequest; -import com.cloud.bridge.service.core.s3.S3GetObjectResponse; -<<<<<<< HEAD -======= -import com.cloud.bridge.service.core.s3.S3Grant; ->>>>>>> 6472e7b... Now really adding the renamed files! -import com.cloud.bridge.service.core.s3.S3MetaDataEntry; -import com.cloud.bridge.service.core.s3.S3MultipartPart; -import com.cloud.bridge.service.core.s3.S3PolicyContext; -import com.cloud.bridge.service.core.s3.S3PutObjectInlineRequest; -import com.cloud.bridge.service.core.s3.S3PutObjectInlineResponse; -import com.cloud.bridge.service.core.s3.S3PutObjectRequest; -import com.cloud.bridge.service.core.s3.S3Response; -<<<<<<< HEAD -======= -import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest; ->>>>>>> 6472e7b... Now really adding the renamed files! -import com.cloud.bridge.service.core.s3.S3SetObjectAccessControlPolicyRequest; -import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; -import com.cloud.bridge.service.exception.PermissionDeniedException; -import com.cloud.bridge.util.Converter; -import com.cloud.bridge.util.DateHelper; -import com.cloud.bridge.util.HeaderParam; -import com.cloud.bridge.util.ServletRequestDataSource; -<<<<<<< HEAD -import com.cloud.bridge.util.Tuple; - -/** - * @author Kelven Yang -======= -import com.cloud.bridge.util.OrderedPair; - -/** - * @author Kelven Yang, John Zucker ->>>>>>> 6472e7b... Now really adding the renamed files! - */ -public class S3ObjectAction implements ServletAction { - protected final static Logger logger = Logger.getLogger(S3ObjectAction.class); - - private DocumentBuilderFactory dbf = null; -<<<<<<< HEAD - private OMFactory factory = OMAbstractFactory.getOMFactory(); - private XMLOutputFactory xmlOutFactory = XMLOutputFactory.newInstance(); -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - - public S3ObjectAction() { - dbf = DocumentBuilderFactory.newInstance(); - dbf.setNamespaceAware( true ); - - } - - public void execute(HttpServletRequest request, HttpServletResponse response) - throws IOException, XMLStreamException - { - String method = request.getMethod(); -<<<<<<< HEAD - String queryString = request.getQueryString(); -======= - String queryString = request.getQueryString(); ->>>>>>> 6472e7b... Now really adding the renamed files! - String copy = null; - - response.addHeader( "x-amz-request-id", UUID.randomUUID().toString()); - - if ( method.equalsIgnoreCase( "GET" )) - { - if ( queryString != null && queryString.length() > 0 ) - { - if (queryString.contains("acl")) executeGetObjectAcl(request, response); - else if (queryString.contains("uploadId")) executeListUploadParts(request, response); - else executeGetObject(request, response); - } - else executeGetObject(request, response); - } - else if (method.equalsIgnoreCase( "PUT" )) - { - if ( queryString != null && queryString.length() > 0 ) - { - if (queryString.contains("acl")) executePutObjectAcl(request, response); - else if (queryString.contains("partNumber")) executeUploadPart(request, response); - else executePutObject(request, response); - } - else if ( null != (copy = request.getHeader( "x-amz-copy-source" ))) -<<<<<<< HEAD - { -======= - { ->>>>>>> 6472e7b... Now really adding the renamed files! - executeCopyObject(request, response, copy.trim()); - } - else executePutObject(request, response); - } - else if (method.equalsIgnoreCase( "DELETE" )) - { - if ( queryString != null && queryString.length() > 0 ) - { - if (queryString.contains("uploadId")) executeAbortMultipartUpload(request, response); - else executeDeleteObject(request, response); - } - else executeDeleteObject(request, response); - } -<<<<<<< HEAD - else if (method.equalsIgnoreCase( "HEAD" )) - { - executeHeadObject(request, response); -======= - else if (method.equalsIgnoreCase( "HEAD" )) - { - executeHeadObject(request, response); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - else if (method.equalsIgnoreCase( "POST" )) - { - if ( queryString != null && queryString.length() > 0 ) - { - if (queryString.contains("uploads")) executeInitiateMultipartUpload(request, response); - else if (queryString.contains("uploadId")) executeCompleteMultipartUpload(request, response); - } -<<<<<<< HEAD - else executePostObject(request, response); -======= - else if ( request.getAttribute(S3Constants.PLAIN_POST_ACCESS_KEY) !=null ) - executePlainPostObject (request, response); - // TODO - Having implemented the request, now provide an informative HTML page response - else - executePostObject(request, response); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - else throw new IllegalArgumentException( "Unsupported method in REST request"); - } - - - private void executeCopyObject(HttpServletRequest request, HttpServletResponse response, String copy) - throws IOException, XMLStreamException - { - S3CopyObjectRequest engineRequest = new S3CopyObjectRequest(); - String versionId = null; - - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - String sourceBucketName = null; - String sourceKey = null; - - // [A] Parse the x-amz-copy-source header into usable pieces -<<<<<<< HEAD - // -> is there a ?versionId= value -======= - // Check to find a ?versionId= value if any ->>>>>>> 6472e7b... Now really adding the renamed files! - int index = copy.indexOf( '?' ); - if (-1 != index) - { - versionId = copy.substring( index+1 ); - if (versionId.startsWith( "versionId=" )) engineRequest.setVersion( versionId.substring( 10 )); - copy = copy.substring( 0, index ); - } - -<<<<<<< HEAD - // -> the value of copy should look like: "/bucket-name/object-name" - index = copy.indexOf( '/' ); - if ( 0 != index ) - throw new IllegalArgumentException( "Invalid x-amz-copy-sourse header value [" + copy + "]" ); - else copy = copy.substring( 1 ); - - index = copy.indexOf( '/' ); - if ( -1 == index ) - throw new IllegalArgumentException( "Invalid x-amz-copy-sourse header value [" + copy + "]" ); -======= - // The value of copy should look like: "bucket-name/object-name" - index = copy.indexOf( '/' ); - - // In case it looks like "/bucket-name/object-name" discard a leading '/' if it exists - if ( 0 == index ) - { - copy = copy.substring(1); - index = copy.indexOf( '/' ); - } - - if ( -1 == index ) - throw new IllegalArgumentException( "Invalid x-amz-copy-source header value [" + copy + "]" ); ->>>>>>> 6472e7b... Now really adding the renamed files! - - sourceBucketName = copy.substring( 0, index ); - sourceKey = copy.substring( index+1 ); - - - // [B] Set the object used in the SOAP request so it can do the bulk of the work for us - engineRequest.setSourceBucketName( sourceBucketName ); - engineRequest.setSourceKey( sourceKey ); - engineRequest.setDestinationBucketName( bucketName ); - engineRequest.setDestinationKey( key ); - - engineRequest.setDataDirective( request.getHeader( "x-amz-metadata-directive" )); - engineRequest.setMetaEntries( extractMetaData( request )); - engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); - engineRequest.setConditions( conditionalRequest( request, true )); - - - // [C] Do the actual work and return the result - S3CopyObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - - versionId = engineResponse.getCopyVersion(); - if (null != versionId) response.addHeader( "x-amz-copy-source-version-id", versionId ); - versionId = engineResponse.getPutVersion(); - if (null != versionId) response.addHeader( "x-amz-version-id", versionId ); - -<<<<<<< HEAD - // -> serialize using the apache's Axiom classes - CopyObjectResponse allBuckets = S3SoapServiceImpl.toCopyObjectResponse( engineResponse ); - - OutputStream os = response.getOutputStream(); - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - XMLStreamWriter xmlWriter = xmlOutFactory.createXMLStreamWriter( os ); - String documentStart = new String( "" ); - os.write( documentStart.getBytes()); - MTOMAwareXMLSerializer MTOMWriter = new MTOMAwareXMLSerializer( xmlWriter ); - allBuckets.serialize( new QName( "http://s3.amazonaws.com/doc/2006-03-01/", "CopyObjectResponse", "ns1" ), factory, MTOMWriter ); - xmlWriter.flush(); - xmlWriter.close(); - os.close(); - } - - private void executeGetObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException -======= - // To allow the copy object result to be serialized via Axiom classes - CopyObjectResponse allBuckets = S3SerializableServiceImplementation.toCopyObjectResponse( engineResponse ); - - OutputStream outputStream = response.getOutputStream(); - response.setStatus(200); - response.setContentType("application/xml"); - // The content-type literally should be "application/xml; charset=UTF-8" - // but any compliant JVM supplies utf-8 by default; - - MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("CopyObjectResult", outputStream ); - resultWriter.startWrite(); - resultWriter.writeout(allBuckets); - resultWriter.stopWrite(); - - } - - private void executeGetObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException, XMLStreamException ->>>>>>> 6472e7b... Now really adding the renamed files! - { - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - - S3GetObjectAccessControlPolicyRequest engineRequest = new S3GetObjectAccessControlPolicyRequest(); - engineRequest.setBucketName( bucketName ); - engineRequest.setKey( key ); - - // -> is this a request for a specific version of the object? look for "versionId=" in the query string - String queryString = request.getQueryString(); - if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); - - S3AccessControlPolicy engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - int resultCode = engineResponse.getResultCode(); - if (200 != resultCode) { - response.setStatus( resultCode ); - return; - } - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - - -<<<<<<< HEAD - // -> serialize using the apache's Axiom classes - GetObjectAccessControlPolicyResponse onePolicy = S3SoapServiceImpl.toGetObjectAccessControlPolicyResponse( engineResponse ); - - try { - OutputStream os = response.getOutputStream(); - response.setStatus( resultCode ); - response.setContentType("text/xml; charset=UTF-8"); - XMLStreamWriter xmlWriter = xmlOutFactory.createXMLStreamWriter( os ); - String documentStart = new String( "" ); - os.write( documentStart.getBytes()); - MTOMAwareXMLSerializer MTOMWriter = new MTOMAwareXMLSerializer( xmlWriter ); - onePolicy.serialize( new QName( "http://s3.amazonaws.com/doc/2006-03-01/", "GetObjectAccessControlPolicyResponse", "ns1" ), factory, MTOMWriter ); - xmlWriter.flush(); - xmlWriter.close(); - os.close(); - } - catch( XMLStreamException e ) { - throw new IOException( e.toString()); - } - } - - private void executePutObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException - { - S3PutObjectRequest putRequest = null; - - // -> reuse the Access Control List parsing code that was added to support DIME - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - try { - putRequest = S3RestServlet.toEnginePutObjectRequest( request.getInputStream()); - } - catch( Exception e ) { - throw new IOException( e.toString()); - } - - // -> reuse the SOAP code to save the passed in ACLs - S3SetObjectAccessControlPolicyRequest engineRequest = new S3SetObjectAccessControlPolicyRequest(); - engineRequest.setBucketName( bucketName ); - engineRequest.setKey( key ); - engineRequest.setAcl( putRequest.getAcl()); - - // -> is this a request for a specific version of the object? look for "versionId=" in the query string - String queryString = request.getQueryString(); - if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); - - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - response.setStatus( engineResponse.getResultCode()); -======= - // To allow the get object acl policy result to be serialized via Axiom classes - GetObjectAccessControlPolicyResponse onePolicy = S3SerializableServiceImplementation.toGetObjectAccessControlPolicyResponse( engineResponse ); - - OutputStream outputStream = response.getOutputStream(); - response.setStatus(200); - response.setContentType("application/xml"); - // The content-type literally should be "application/xml; charset=UTF-8" - // but any compliant JVM supplies utf-8 by default; - - MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("GetObjectAccessControlPolicyResult", outputStream ); - resultWriter.startWrite(); - resultWriter.writeout(onePolicy); - resultWriter.stopWrite(); - } - - private void executePutObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException - { - // [A] Determine that there is an applicable bucket which might have an ACL set - - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName( bucketName ); - String owner = null; - if ( null != bucket ) - owner = bucket.getOwnerCanonicalId(); - if (null == owner) - { - logger.error( "ACL update failed since " + bucketName + " does not exist" ); - throw new IOException("ACL update failed"); - } - if (null == key) - { - logger.error( "ACL update failed since " + bucketName + " does not contain the expected key" ); - throw new IOException("ACL update failed"); - } - - // [B] Obtain the grant request which applies to the acl request string. This latter is supplied as the value of the x-amz-acl header. - - S3SetObjectAccessControlPolicyRequest engineRequest = new S3SetObjectAccessControlPolicyRequest(); - S3Grant grantRequest = new S3Grant(); - S3AccessControlList aclRequest = new S3AccessControlList(); - - String aclRequestString = request.getHeader("x-amz-acl"); - OrderedPair accessControlsForObjectOwner = SAcl.getCannedAccessControls(aclRequestString,"SObject"); - grantRequest.setPermission(accessControlsForObjectOwner.getFirst()); - grantRequest.setGrantee(accessControlsForObjectOwner.getSecond()); - grantRequest.setCanonicalUserID(owner); - aclRequest.addGrant(grantRequest); - engineRequest.setAcl(aclRequest); - engineRequest.setBucketName(bucketName); - engineRequest.setKey(key); - - - // [C] Allow an S3Engine to handle the S3SetObjectAccessControlPolicyRequest - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.setStatus( engineResponse.getResultCode()); - ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - private void executeGetObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); -<<<<<<< HEAD - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); -======= - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - ->>>>>>> 6472e7b... Now really adding the renamed files! - - S3GetObjectRequest engineRequest = new S3GetObjectRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setInlineData(true); - engineRequest.setReturnData(true); - //engineRequest.setReturnMetadata(true); - engineRequest = setRequestByteRange( request, engineRequest ); - - // -> is this a request for a specific version of the object? look for "versionId=" in the query string - String queryString = request.getQueryString(); - if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); - - S3GetObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - response.setStatus( engineResponse.getResultCode()); - - String deleteMarker = engineResponse.getDeleteMarker(); - if ( null != deleteMarker ) { - response.addHeader( "x-amz-delete-marker", "true" ); - response.addHeader( "x-amz-version-id", deleteMarker ); - } - else { - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - // -> was the get conditional? - if (!conditionPassed( request, response, engineResponse.getLastModified().getTime(), engineResponse.getETag())) - return; - - - // -> is there data to return - // -> from the Amazon REST documentation it appears that Meta data is only returned as part of a HEAD request - //returnMetaData( engineResponse, response ); - - DataHandler dataHandler = engineResponse.getData(); - if (dataHandler != null) { - response.addHeader("ETag", "\"" + engineResponse.getETag() + "\""); - response.addHeader("Last-Modified", DateHelper.getDateDisplayString( - DateHelper.GMT_TIMEZONE, engineResponse.getLastModified().getTime(), "E, d MMM yyyy HH:mm:ss z")); - - response.setContentLength((int)engineResponse.getContentLength()); - S3RestServlet.writeResponse(response, dataHandler.getInputStream()); - } - } - - private void executePutObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String continueHeader = request.getHeader( "Expect" ); - if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { - S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); - } - -<<<<<<< HEAD - String contentType = request.getHeader( "Content-Type" ); -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); - - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setContentLength(contentLength); - engineRequest.setMetaEntries( extractMetaData( request )); - engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); - - DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); - engineRequest.setData(dataHandler); - - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - /** - * Once versioining is turned on then to delete an object requires specifying a version - * parameter. A deletion marker is set once versioning is turned on in a bucket. - */ - private void executeDeleteObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - - S3DeleteObjectRequest engineRequest = new S3DeleteObjectRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - - // -> is this a request for a specific version of the object? look for "versionId=" in the query string - String queryString = request.getQueryString(); - if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); - - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - - response.setStatus( engineResponse.getResultCode()); - String version = engineRequest.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } -<<<<<<< HEAD -======= - - /* - * The purpose of a plain POST operation is to add an object to a specified bucket using HTML forms. - * The capability is for developer and tester convenience providing a simple browser-based upload - * feature as an alternative to using PUTs. - * In the case of PUTs the upload information is passed through HTTP headers. However in the case of a - * POST this information must be supplied as form fields. Many of these are mandatory or otherwise - * the POST request will be rejected. - * The requester using the HTML page must submit valid credentials sufficient for checking that - * the bucket to which the object is to be added has WRITE permission for that user. The AWS access - * key field on the form is taken to be synonymous with the user canonical ID for this purpose. - */ - private void executePlainPostObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String continueHeader = request.getHeader( "Expect" ); - if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { - S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); - } - - long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); - - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - String accessKey = (String) request.getAttribute(S3Constants.PLAIN_POST_ACCESS_KEY); - String signature = (String) request.getAttribute(S3Constants.PLAIN_POST_SIGNATURE); - S3Grant grant = new S3Grant(); - grant.setCanonicalUserID(accessKey); - grant.setGrantee(SAcl.GRANTEE_USER); - grant.setPermission(SAcl.PERMISSION_FULL); - S3AccessControlList acl = new S3AccessControlList(); - acl.addGrant(grant); - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setAcl(acl); - engineRequest.setContentLength(contentLength); - engineRequest.setMetaEntries( extractMetaData( request )); - engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); - - DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); - engineRequest.setData(dataHandler); - - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - ->>>>>>> 6472e7b... Now really adding the renamed files! - - private void executeHeadObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - - S3GetObjectRequest engineRequest = new S3GetObjectRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setInlineData(true); // -> need to set so we get ETag etc returned - engineRequest.setReturnData(true); - engineRequest.setReturnMetadata(true); - engineRequest = setRequestByteRange( request, engineRequest ); - - // -> is this a request for a specific version of the object? look for "versionId=" in the query string - String queryString = request.getQueryString(); - if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); - - S3GetObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - response.setStatus( engineResponse.getResultCode()); - - String deleteMarker = engineResponse.getDeleteMarker(); - if ( null != deleteMarker ) { - response.addHeader( "x-amz-delete-marker", "true" ); - response.addHeader( "x-amz-version-id", deleteMarker ); - } - else { - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - // -> was the head request conditional? - if (!conditionPassed( request, response, engineResponse.getLastModified().getTime(), engineResponse.getETag())) - return; - - - // -> for a head request we return everything except the data - returnMetaData( engineResponse, response ); - - DataHandler dataHandler = engineResponse.getData(); - if (dataHandler != null) { - response.addHeader("ETag", "\"" + engineResponse.getETag() + "\""); - response.addHeader("Last-Modified", DateHelper.getDateDisplayString( - DateHelper.GMT_TIMEZONE, engineResponse.getLastModified().getTime(), "E, d MMM yyyy HH:mm:ss z")); - - response.setContentLength((int)engineResponse.getContentLength()); - } - } - - // There is a problem with POST since the 'Signature' and 'AccessKey' parameters are not - // determined until we hit this function (i.e., they are encoded in the body of the message - // they are not HTTP request headers). All the values we used to get in the request headers - // are not encoded in the request body. - // -<<<<<<< HEAD - public void executePostObject( HttpServletRequest request, HttpServletResponse response ) throws IOException -======= - // add ETag header computed as Base64 MD5 whenever object is uploaded or updated - // - private void executePostObject( HttpServletRequest request, HttpServletResponse response ) throws IOException ->>>>>>> 6472e7b... Now really adding the renamed files! - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String contentType = request.getHeader( "Content-Type" ); - int boundaryIndex = contentType.indexOf( "boundary=" ); - String boundary = "--" + (contentType.substring( boundaryIndex + 9 )); - String lastBoundary = boundary + "--"; - - InputStreamReader isr = new InputStreamReader( request.getInputStream()); - BufferedReader br = new BufferedReader( isr ); - - StringBuffer temp = new StringBuffer(); - String oneLine = null; - String name = null; - String value = null; - String metaName = null; // -> after stripped off the x-amz-meta- - boolean isMetaTag = false; - int countMeta = 0; - int state = 0; - - // [A] First parse all the parts out of the POST request and message body - // -> bucket name is still encoded in a Host header - S3AuthParams params = new S3AuthParams(); - List metaSet = new ArrayList(); - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName( bucket ); - - // -> the last body part contains the content that is used to write the S3 object, all - // other body parts are header values - while( null != (oneLine = br.readLine())) - { - if ( oneLine.startsWith( lastBoundary )) - { - // -> this is the data of the object to put - if (0 < temp.length()) - { - value = temp.toString(); - temp.setLength( 0 ); - - engineRequest.setContentLength( value.length()); - engineRequest.setDataAsString( value ); - } - break; - } - else if ( oneLine.startsWith( boundary )) - { - // -> this is the header data - if (0 < temp.length()) - { - value = temp.toString().trim(); - temp.setLength( 0 ); - //System.out.println( "param: " + name + " = " + value ); - - if (name.equalsIgnoreCase( "key" )) { - engineRequest.setKey( value ); - } - else if (name.equalsIgnoreCase( "x-amz-acl" )) { - engineRequest.setCannedAccess( value ); - } - else if (isMetaTag) { - S3MetaDataEntry oneMeta = new S3MetaDataEntry(); - oneMeta.setName( metaName ); - oneMeta.setValue( value ); - metaSet.add( oneMeta ); - countMeta++; - metaName = null; - } - - // -> build up the headers so we can do authentication on this POST - HeaderParam oneHeader = new HeaderParam(); - oneHeader.setName( name ); - oneHeader.setValue( value ); - params.addHeader( oneHeader ); - } - state = 1; - } - else if (1 == state && 0 == oneLine.length()) - { - // -> data of a body part starts here - state = 2; - } - else if (1 == state) - { - // -> the name of the 'name-value' pair is encoded in the Content-Disposition header - if (oneLine.startsWith( "Content-Disposition: form-data;")) - { - isMetaTag = false; - int nameOffset = oneLine.indexOf( "name=" ); - if (-1 != nameOffset) - { - name = oneLine.substring( nameOffset+5 ); - if (name.startsWith( "\"" )) name = name.substring( 1 ); - if (name.endsWith( "\"" )) name = name.substring( 0, name.length()-1 ); - name = name.trim(); - - if (name.startsWith( "x-amz-meta-" )) { - metaName = name.substring( 11 ); - isMetaTag = true; - } - } - } - } - else if (2 == state) - { - // -> the body parts data may take up multiple lines - //System.out.println( oneLine.length() + " body data: " + oneLine ); - temp.append( oneLine ); - } -// else System.out.println( oneLine.length() + " preamble: " + oneLine ); - } - - - // [B] Authenticate the POST request after we have all the headers - try { - S3RestServlet.authenticateRequest( request, params ); - } - catch( Exception e ) { - throw new IOException( e.toString()); - } - - // [C] Perform the request - if (0 < countMeta) engineRequest.setMetaEntries( metaSet.toArray(new S3MetaDataEntry[0])); - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - /** - * Save all the information about the multipart upload request in the database so once it is finished - * (in the future) we can create the real S3 object. - * - * @throws IOException - */ - private void executeInitiateMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException - { -<<<<<<< HEAD - // -> this request is via a POST which typically has its auth parameters inside the message -======= - // This request is via a POST which typically has its auth parameters inside the message ->>>>>>> 6472e7b... Now really adding the renamed files! - try { - S3RestServlet.authenticateRequest( request, S3RestServlet.extractRequestHeaders( request )); - } - catch( Exception e ) { - throw new IOException( e.toString()); - } - - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - String cannedAccess = request.getHeader( "x-amz-acl" ); - S3MetaDataEntry[] meta = extractMetaData( request ); - - // -> the S3 engine has easy access to all the privileged checking code - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setCannedAccess( cannedAccess ); - engineRequest.setMetaEntries( meta ); - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().initiateMultipartUpload( engineRequest ); - int result = engineResponse.getResultCode(); - response.setStatus( result ); - if (200 != result) return; - - // -> there is no SOAP version of this function - StringBuffer xml = new StringBuffer(); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ).append( bucket ).append( "" ); - xml.append( "" ).append( key ).append( "" ); - xml.append( "" ).append( engineResponse.getUploadId()).append( "" ); - xml.append( "" ); - - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - private void executeUploadPart( HttpServletRequest request, HttpServletResponse response ) throws IOException - { - String continueHeader = request.getHeader( "Expect" ); - if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { - S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); - } - - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - int partNumber = -1; - int uploadId = -1; - - long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); - -<<<<<<< HEAD - String md5 = request.getHeader( "Content-MD5" ); - -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - String temp = request.getParameter("uploadId"); - if (null != temp) uploadId = Integer.parseInt( temp ); - - temp = request.getParameter("partNumber"); - if (null != temp) partNumber = Integer.parseInt( temp ); - if (partNumber < 1 || partNumber > 10000) { - logger.error("uploadPart invalid part number " + partNumber ); - response.setStatus(416); - return; - } - - // -> verification - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - if (null == uploadDao.multipartExits( uploadId )) { - response.setStatus(404); - return; - } - - // -> another requirement is that only the upload initiator can upload parts - String initiator = uploadDao.getInitiator( uploadId ); - if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) { - response.setStatus(403); - return; - } - } - catch( Exception e ) { - logger.error("executeUploadPart failed due to " + e.getMessage(), e); - response.setStatus(500); - return; - } - - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setContentLength(contentLength); - DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); - engineRequest.setData(dataHandler); - - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().saveUploadPart( engineRequest, uploadId, partNumber ); - if (null != engineResponse.getETag()) response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); - response.setStatus(engineResponse.getResultCode()); - } - - /** - * This function is required to both parsing XML on the request and return XML as part of its result. - * - * @param request - * @param response - * @throws IOException - */ - private void executeCompleteMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException - { - // [A] This request is via a POST which typically has its auth parameters inside the message - try { - S3RestServlet.authenticateRequest( request, S3RestServlet.extractRequestHeaders( request )); - } - catch( Exception e ) { - throw new IOException( e.toString()); - } - - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - S3MultipartPart[] parts = null; - S3MetaDataEntry[] meta = null; - String cannedAccess = null; - int uploadId = -1; - -<<<<<<< HEAD - // -> Amazon defines to keep connection alive by sending whitespace characters until done - OutputStream os = response.getOutputStream(); -======= - // AWS S3 specifies that the keep alive connection is by sending whitespace characters until done - // Therefore the XML version prolog is prepended to the stream in advance - OutputStream outputStream = response.getOutputStream(); - outputStream.write("".getBytes()); ->>>>>>> 6472e7b... Now really adding the renamed files! - - String temp = request.getParameter("uploadId"); - if (null != temp) uploadId = Integer.parseInt( temp ); - - - // [B] Look up all the uploaded body parts and related info - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - if (null == uploadDao.multipartExits( uploadId )) { - response.setStatus(404); -<<<<<<< HEAD - returnErrorXML( 404, "NotFound", os ); -======= - returnErrorXML( 404, "NotFound", outputStream ); ->>>>>>> 6472e7b... Now really adding the renamed files! - return; - } - - // -> another requirement is that only the upload initiator can upload parts - String initiator = uploadDao.getInitiator( uploadId ); - if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) { - response.setStatus(403); -<<<<<<< HEAD - returnErrorXML( 403, "Forbidden", os ); -======= - returnErrorXML( 403, "Forbidden", outputStream ); ->>>>>>> 6472e7b... Now really adding the renamed files! - return; - } - - parts = uploadDao.getParts( uploadId, 10000, 0 ); - meta = uploadDao.getMeta( uploadId ); - cannedAccess = uploadDao.getCannedAccess( uploadId ); - } - catch( Exception e ) { - logger.error("executeCompleteMultipartUpload failed due to " + e.getMessage(), e); - response.setStatus(500); -<<<<<<< HEAD - returnErrorXML( 500, "InternalError", os ); -======= - returnErrorXML( 500, "InternalError", outputStream ); ->>>>>>> 6472e7b... Now really adding the renamed files! - return; - } - - - // [C] Parse the given XML body part and perform error checking -<<<<<<< HEAD - Tuple match = verifyParts( request.getInputStream(), parts ); - if (200 != match.getFirst().intValue()) { - response.setStatus(match.getFirst().intValue()); - returnErrorXML( match.getFirst().intValue(), match.getSecond(), os ); -======= - OrderedPair match = verifyParts( request.getInputStream(), parts ); - if (200 != match.getFirst().intValue()) { - response.setStatus(match.getFirst().intValue()); - returnErrorXML( match.getFirst().intValue(), match.getSecond(), outputStream ); ->>>>>>> 6472e7b... Now really adding the renamed files! - return; - } - - - // [D] Ask the engine to create a newly re-constituted object - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setMetaEntries(meta); - engineRequest.setCannedAccess(cannedAccess); - -<<<<<<< HEAD - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().concatentateMultipartUploads( response, engineRequest, parts, os ); -======= - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().concatentateMultipartUploads( response, engineRequest, parts, outputStream ); ->>>>>>> 6472e7b... Now really adding the renamed files! - int result = engineResponse.getResultCode(); - // -> free all multipart state since we now have one concatentated object - if (200 == result) ServiceProvider.getInstance().getS3Engine().freeUploadParts( bucket, uploadId, false ); - -<<<<<<< HEAD - // -> if all successful then clean up all left over parts - if ( 200 == result ) - { - StringBuffer xml = new StringBuffer(); - xml.append( "" ); -======= - // If all successful then clean up all left over parts - // Notice that "" has already been written into the servlet output stream at the beginning of section [A] - if ( 200 == result ) - { - StringBuffer xml = new StringBuffer(); ->>>>>>> 6472e7b... Now really adding the renamed files! - xml.append( "" ); - xml.append( "" ).append( "http://" + bucket + ".s3.amazonaws.com/" + key ).append( "" ); - xml.append( "" ).append( bucket ).append( "" ); - xml.append( "" ).append( key ).append( "" ); -<<<<<<< HEAD - xml.append( "\"" ).append( engineResponse.getETag()).append( "\"" ); - xml.append( "" ); - os.write( xml.toString().getBytes()); - os.close(); - } - else returnErrorXML( result, null, os ); -======= - xml.append( "\"" ).append( engineResponse.getETag()).append( "\"" ); - xml.append( "" ); - String xmlString = xml.toString().replaceAll("^\\s+", ""); // Remove leading whitespace characters - outputStream.write( xmlString.getBytes()); - outputStream.close(); - } - else returnErrorXML( result, null, outputStream ); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - private void executeAbortMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - int uploadId = -1; - - String temp = request.getParameter("uploadId"); - if (null != temp) uploadId = Integer.parseInt( temp ); - - int result = ServiceProvider.getInstance().getS3Engine().freeUploadParts( bucket, uploadId, true ); - response.setStatus( result ); - } - - private void executeListUploadParts( HttpServletRequest request, HttpServletResponse response ) throws IOException - { - String bucketName = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - String owner = null; - String initiator = null; - S3MultipartPart[] parts = null; - int remaining = 0; - int uploadId = -1; - int maxParts = 1000; - int partMarker = 0; - int nextMarker = 0; - - String temp = request.getParameter("uploadId"); - if (null != temp) uploadId = Integer.parseInt( temp ); - - temp = request.getParameter("max-parts"); - if (null != temp) { - maxParts = Integer.parseInt( temp ); - if (maxParts > 1000 || maxParts < 0) maxParts = 1000; - } - - temp = request.getParameter("part-number-marker"); - if (null != temp) partMarker = Integer.parseInt( temp ); - - - // -> does the bucket exist, we may need it to verify access permissions - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "listUploadParts failed since " + bucketName + " does not exist" ); - response.setStatus(404); - return; - } - - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); -<<<<<<< HEAD - Tuple exists = uploadDao.multipartExits( uploadId ); -======= - OrderedPair exists = uploadDao.multipartExits( uploadId ); ->>>>>>> 6472e7b... Now really adding the renamed files! - if (null == exists) { - response.setStatus(404); - return; - } - owner = exists.getFirst(); - - // -> the multipart initiator or bucket owner can do this action - initiator = uploadDao.getInitiator( uploadId ); - if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) - { - try { - // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket - S3PolicyContext context = new S3PolicyContext( PolicyActions.ListMultipartUploadParts, bucketName ); - context.setKeyName( exists.getSecond()); - S3Engine.verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - } - catch (PermissionDeniedException e) { - response.setStatus(403); - return; - } - } - - parts = uploadDao.getParts( uploadId, maxParts, partMarker ); - remaining = uploadDao.numParts( uploadId, partMarker+maxParts ); - } - catch( Exception e ) { - logger.error("List Uploads failed due to " + e.getMessage(), e); - response.setStatus(500); - } - - - StringBuffer xml = new StringBuffer(); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ).append( bucket ).append( "" ); - xml.append( "" ).append( key ).append( "" ); - xml.append( "" ).append( uploadId ).append( "" ); - - // -> currently we just have the access key and have no notion of a display name - xml.append( "" ); - xml.append( "" ).append( initiator ).append( "" ); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ).append( owner ).append( "" ); - xml.append( "" ); - xml.append( "" ); - - StringBuffer partsList = new StringBuffer(); - for( int i=0; i < parts.length; i++ ) - { - S3MultipartPart onePart = parts[i]; - if (null == onePart) break; - - nextMarker = onePart.getPartNumber(); - partsList.append( "" ); - partsList.append( "" ).append( nextMarker ).append( "" ); - partsList.append( "" ).append( DatatypeConverter.printDateTime( onePart.getLastModified())).append( "" ); - partsList.append( "\"" ).append( onePart.getETag()).append( "\"" ); - partsList.append( "" ).append( onePart.getSize()).append( "" ); - partsList.append( "" ); - } - - xml.append( "STANDARD" ); - xml.append( "" ).append( partMarker ).append( "" ); - xml.append( "" ).append( nextMarker ).append( "" ); - xml.append( "" ).append( maxParts ).append( "" ); - xml.append( "" ).append((0 < remaining ? "true" : "false" )).append( "" ); - - xml.append( partsList.toString()); - xml.append( "" ); - - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - /** - * Support the "Range: bytes=0-399" header with just one byte range. - * @param request - * @param engineRequest - * @return - */ - private S3GetObjectRequest setRequestByteRange( HttpServletRequest request, S3GetObjectRequest engineRequest ) - { - String temp = request.getHeader( "Range" ); - if (null == temp) return engineRequest; - - int offset = temp.indexOf( "=" ); - if (-1 != offset) - { - String range = temp.substring( offset+1 ); - - String[] parts = range.split( "-" ); - if (2 >= parts.length) { - // -> the end byte is inclusive - engineRequest.setByteRangeStart( Long.parseLong(parts[0])); - engineRequest.setByteRangeEnd( Long.parseLong(parts[1])+1); - } - } - return engineRequest; - } - - private S3ConditionalHeaders conditionalRequest( HttpServletRequest request, boolean isCopy ) - { - S3ConditionalHeaders headers = new S3ConditionalHeaders(); - - if (isCopy) { - headers.setModifiedSince( request.getHeader( "x-amz-copy-source-if-modified-since" )); - headers.setUnModifiedSince( request.getHeader( "x-amz-copy-source-if-unmodified-since" )); - headers.setMatch( request.getHeader( "x-amz-copy-source-if-match" )); - headers.setNoneMatch( request.getHeader( "x-amz-copy-source-if-none-match" )); - } - else { - headers.setModifiedSince( request.getHeader( "If-Modified-Since" )); - headers.setUnModifiedSince( request.getHeader( "If-Unmodified-Since" )); - headers.setMatch( request.getHeader( "If-Match" )); - headers.setNoneMatch( request.getHeader( "If-None-Match" )); - } - return headers; - } - - private boolean conditionPassed( HttpServletRequest request, HttpServletResponse response, Date lastModified, String ETag ) - { - S3ConditionalHeaders ifCond = conditionalRequest( request, false ); - - if (0 > ifCond.ifModifiedSince( lastModified )) { - response.setStatus( 304 ); - return false; - } - if (0 > ifCond.ifUnmodifiedSince( lastModified )) { - response.setStatus( 412 ); - return false; - } - if (0 > ifCond.ifMatchEtag( ETag )) { - response.setStatus( 412 ); - return false; - } - if (0 > ifCond.ifNoneMatchEtag( ETag )) { - response.setStatus( 412 ); - return false; - } - return true; - } - - /** - * Return the saved object's meta data back to the client as HTTP "x-amz-meta-" headers. - * This function is constructing an HTTP header and these headers have a defined syntax - * as defined in rfc2616. Any characters that could cause an invalid HTTP header will - * prevent that meta data from being returned via the REST call (as is defined in the Amazon - * spec). These characters can be defined if using the SOAP API as well as the REST API. - * - * @param engineResponse - * @param response - */ - private void returnMetaData( S3GetObjectResponse engineResponse, HttpServletResponse response ) - { - boolean ignoreMeta = false; - int ignoredCount = 0; - - S3MetaDataEntry[] metaSet = engineResponse.getMetaEntries(); - for( int i=0; null != metaSet && i < metaSet.length; i++ ) - { - String name = metaSet[i].getName(); - String value = metaSet[i].getValue(); - byte[] nameBytes = name.getBytes(); - ignoreMeta = false; - - // -> cannot have control characters (octets 0 - 31) and DEL (127), in an HTTP header - for( int j=0; j < name.length(); j++ ) { - if ((0 <= nameBytes[j] && 31 >= nameBytes[j]) || 127 == nameBytes[j]) { - ignoreMeta = true; - break; - } - } - - // -> cannot have HTTP separators in an HTTP header - if (-1 != name.indexOf('(') || -1 != name.indexOf(')') || -1 != name.indexOf('@') || - -1 != name.indexOf('<') || -1 != name.indexOf('>') || -1 != name.indexOf('\"') || - -1 != name.indexOf('[') || -1 != name.indexOf(']') || -1 != name.indexOf('=') || - -1 != name.indexOf(',') || -1 != name.indexOf(';') || -1 != name.indexOf(':') || - -1 != name.indexOf('\\') || -1 != name.indexOf('/') || -1 != name.indexOf(' ') || - -1 != name.indexOf('{') || -1 != name.indexOf('}') || -1 != name.indexOf('?') || - -1 != name.indexOf('\t') - ) ignoreMeta = true; - - - if ( ignoreMeta ) - ignoredCount++; - else response.addHeader( "x-amz-meta-" + name, value ); - } - - if (0 < ignoredCount) response.addHeader( "x-amz-missing-meta", new String( "" + ignoredCount )); - } - - /** - * Extract the name and value of all meta data so it can be written with the - * object that is being 'PUT'. - * - * @param request - * @return - */ - private S3MetaDataEntry[] extractMetaData( HttpServletRequest request ) - { - List metaSet = new ArrayList(); - int count = 0; - - Enumeration headers = request.getHeaderNames(); - while( headers.hasMoreElements()) - { - String key = (String)headers.nextElement(); - if (key.startsWith( "x-amz-meta-" )) - { - String name = key.substring( 11 ); - String value = request.getHeader( key ); - if (null != value) { - S3MetaDataEntry oneMeta = new S3MetaDataEntry(); - oneMeta.setName( name ); - oneMeta.setValue( value ); - metaSet.add( oneMeta ); - count++; - } - } - } - - if ( 0 < count ) - return metaSet.toArray(new S3MetaDataEntry[0]); - else return null; - } - - /** - * Parameters on the query string may or may not be name-value pairs. - * For example: "?acl&versionId=2", notice that "acl" has no value other - * than it is present. - * - * @param queryString - from a URL to locate the 'find' parameter - * @param find - name string to return first found - * @return the value matching the found name - */ - private String returnParameter( String queryString, String find ) - { - int offset = queryString.indexOf( find ); - if (-1 != offset) - { - String temp = queryString.substring( offset ); - String[] paramList = temp.split( "[&=]" ); - if (null != paramList && 2 <= paramList.length) return paramList[1]; - } - return null; - } - - private void returnErrorXML( int errorCode, String errorDescription, OutputStream os ) throws IOException - { - StringBuffer xml = new StringBuffer(); - - xml.append( "" ); - xml.append( "" ); - - if ( null != errorDescription ) - xml.append( "" ).append( errorDescription ).append( "" ); - else xml.append( "" ).append( errorCode ).append( "" ); - - xml.append( "" ).append( "" ).append( "" ); - xml.append( "" ).append( "" ).append( "" ); - xml.append( "" ).append( "" ).append( "" ); - xml.append( "" ); - - os.write( xml.toString().getBytes()); - os.close(); - } - - /** - * The Complete Multipart Upload function pass in the request body a list of - * all uploaded body parts. It is required that we verify that list matches - * what was uploaded. - * - * @param is - * @param parts - * @return error code, and error string - * @throws ParserConfigurationException, IOException, SAXException - */ -<<<<<<< HEAD - private Tuple verifyParts( InputStream is, S3MultipartPart[] parts ) -======= - private OrderedPair verifyParts( InputStream is, S3MultipartPart[] parts ) ->>>>>>> 6472e7b... Now really adding the renamed files! - { - try { - DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); - dbf.setNamespaceAware( true ); - - DocumentBuilder db = dbf.newDocumentBuilder(); - Document doc = db.parse( is ); - Node parent = null; - Node contents = null; - NodeList children = null; - String temp = null; - String element = null; - String eTag = null; - int lastNumber = -1; - int partNumber = -1; - int count = 0; - - // -> handle with and without a namespace - NodeList nodeSet = doc.getElementsByTagNameNS( "http://s3.amazonaws.com/doc/2006-03-01/", "Part" ); - count = nodeSet.getLength(); - if (0 == count) { - nodeSet = doc.getElementsByTagName( "Part" ); - count = nodeSet.getLength(); - } -<<<<<<< HEAD - if (count != parts.length) return new Tuple(400, "InvalidPart"); -======= - if (count != parts.length) return new OrderedPair(400, "InvalidPart"); ->>>>>>> 6472e7b... Now really adding the renamed files! - - // -> get a list of all the children elements of the 'Part' parent element - for( int i=0; i < count; i++ ) - { - partNumber = -1; - eTag = null; - parent = nodeSet.item(i); - - if (null != (children = parent.getChildNodes())) - { - int numChildren = children.getLength(); - for( int j=0; j < numChildren; j++ ) - { - contents = children.item( j ); - element = contents.getNodeName().trim(); - if ( element.endsWith( "PartNumber" )) - { - temp = contents.getFirstChild().getNodeValue(); - if (null != temp) partNumber = Integer.parseInt( temp ); - //System.out.println( "part: " + partNumber ); - } - else if (element.endsWith( "ETag" )) - { - eTag = contents.getFirstChild().getNodeValue(); - //System.out.println( "etag: " + eTag ); - } - } - } - - // -> do the parts given in the call XML match what was previously uploaded? - if (lastNumber >= partNumber) { -<<<<<<< HEAD - return new Tuple(400, "InvalidPartOrder"); -======= - return new OrderedPair(400, "InvalidPartOrder"); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - if (partNumber != parts[i].getPartNumber() || - eTag == null || - !eTag.equalsIgnoreCase( "\"" + parts[i].getETag() + "\"" )) { -<<<<<<< HEAD - return new Tuple(400, "InvalidPart"); -======= - return new OrderedPair(400, "InvalidPart"); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - lastNumber = partNumber; - } -<<<<<<< HEAD - return new Tuple(200, "Success"); - } - catch( Exception e ) { - return new Tuple(500, e.toString()); -======= - return new OrderedPair(200, "Success"); - } - catch( Exception e ) { - return new OrderedPair(500, e.toString()); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.controller.s3; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Date; +import java.util.Enumeration; +import java.util.List; +import java.util.UUID; + +import javax.activation.DataHandler; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.xml.bind.DatatypeConverter; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.stream.XMLStreamException; + +import org.apache.log4j.Logger; +import org.w3c.dom.Document; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +import com.amazon.s3.CopyObjectResponse; +import com.amazon.s3.GetObjectAccessControlPolicyResponse; +import com.cloud.bridge.io.MTOMAwareResultStreamWriter; +import com.cloud.bridge.model.SAcl; +import com.cloud.bridge.model.SBucket; +import com.cloud.bridge.persist.dao.MultipartLoadDao; +import com.cloud.bridge.persist.dao.SBucketDao; +import com.cloud.bridge.service.S3Constants; +import com.cloud.bridge.service.S3RestServlet; +import com.cloud.bridge.service.UserContext; +import com.cloud.bridge.service.core.s3.S3AccessControlList; +import com.cloud.bridge.service.core.s3.S3AccessControlPolicy; +import com.cloud.bridge.service.core.s3.S3AuthParams; +import com.cloud.bridge.service.core.s3.S3ConditionalHeaders; +import com.cloud.bridge.service.core.s3.S3CopyObjectRequest; +import com.cloud.bridge.service.core.s3.S3CopyObjectResponse; +import com.cloud.bridge.service.core.s3.S3DeleteObjectRequest; +import com.cloud.bridge.service.core.s3.S3Engine; +import com.cloud.bridge.service.core.s3.S3GetObjectAccessControlPolicyRequest; +import com.cloud.bridge.service.core.s3.S3GetObjectRequest; +import com.cloud.bridge.service.core.s3.S3GetObjectResponse; +import com.cloud.bridge.service.core.s3.S3Grant; +import com.cloud.bridge.service.core.s3.S3MetaDataEntry; +import com.cloud.bridge.service.core.s3.S3MultipartPart; +import com.cloud.bridge.service.core.s3.S3PolicyContext; +import com.cloud.bridge.service.core.s3.S3PutObjectInlineRequest; +import com.cloud.bridge.service.core.s3.S3PutObjectInlineResponse; +import com.cloud.bridge.service.core.s3.S3PutObjectRequest; +import com.cloud.bridge.service.core.s3.S3Response; +import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest; +import com.cloud.bridge.service.core.s3.S3SetObjectAccessControlPolicyRequest; +import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; +import com.cloud.bridge.service.exception.PermissionDeniedException; +import com.cloud.bridge.util.Converter; +import com.cloud.bridge.util.DateHelper; +import com.cloud.bridge.util.HeaderParam; +import com.cloud.bridge.util.ServletRequestDataSource; +import com.cloud.bridge.util.OrderedPair; + +/** + * @author Kelven Yang, John Zucker + */ +public class S3ObjectAction implements ServletAction { + protected final static Logger logger = Logger.getLogger(S3ObjectAction.class); + + private DocumentBuilderFactory dbf = null; + + public S3ObjectAction() { + dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware( true ); + + } + + public void execute(HttpServletRequest request, HttpServletResponse response) + throws IOException, XMLStreamException + { + String method = request.getMethod(); + String queryString = request.getQueryString(); + String copy = null; + + response.addHeader( "x-amz-request-id", UUID.randomUUID().toString()); + + if ( method.equalsIgnoreCase( "GET" )) + { + if ( queryString != null && queryString.length() > 0 ) + { + if (queryString.contains("acl")) executeGetObjectAcl(request, response); + else if (queryString.contains("uploadId")) executeListUploadParts(request, response); + else executeGetObject(request, response); + } + else executeGetObject(request, response); + } + else if (method.equalsIgnoreCase( "PUT" )) + { + if ( queryString != null && queryString.length() > 0 ) + { + if (queryString.contains("acl")) executePutObjectAcl(request, response); + else if (queryString.contains("partNumber")) executeUploadPart(request, response); + else executePutObject(request, response); + } + else if ( null != (copy = request.getHeader( "x-amz-copy-source" ))) + { + executeCopyObject(request, response, copy.trim()); + } + else executePutObject(request, response); + } + else if (method.equalsIgnoreCase( "DELETE" )) + { + if ( queryString != null && queryString.length() > 0 ) + { + if (queryString.contains("uploadId")) executeAbortMultipartUpload(request, response); + else executeDeleteObject(request, response); + } + else executeDeleteObject(request, response); + } + else if (method.equalsIgnoreCase( "HEAD" )) + { + executeHeadObject(request, response); + } + else if (method.equalsIgnoreCase( "POST" )) + { + if ( queryString != null && queryString.length() > 0 ) + { + if (queryString.contains("uploads")) executeInitiateMultipartUpload(request, response); + else if (queryString.contains("uploadId")) executeCompleteMultipartUpload(request, response); + } + else if ( request.getAttribute(S3Constants.PLAIN_POST_ACCESS_KEY) !=null ) + executePlainPostObject (request, response); + // TODO - Having implemented the request, now provide an informative HTML page response + else + executePostObject(request, response); + } + else throw new IllegalArgumentException( "Unsupported method in REST request"); + } + + + private void executeCopyObject(HttpServletRequest request, HttpServletResponse response, String copy) + throws IOException, XMLStreamException + { + S3CopyObjectRequest engineRequest = new S3CopyObjectRequest(); + String versionId = null; + + String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + String sourceBucketName = null; + String sourceKey = null; + + // [A] Parse the x-amz-copy-source header into usable pieces + // Check to find a ?versionId= value if any + int index = copy.indexOf( '?' ); + if (-1 != index) + { + versionId = copy.substring( index+1 ); + if (versionId.startsWith( "versionId=" )) engineRequest.setVersion( versionId.substring( 10 )); + copy = copy.substring( 0, index ); + } + + // The value of copy should look like: "bucket-name/object-name" + index = copy.indexOf( '/' ); + + // In case it looks like "/bucket-name/object-name" discard a leading '/' if it exists + if ( 0 == index ) + { + copy = copy.substring(1); + index = copy.indexOf( '/' ); + } + + if ( -1 == index ) + throw new IllegalArgumentException( "Invalid x-amz-copy-source header value [" + copy + "]" ); + + sourceBucketName = copy.substring( 0, index ); + sourceKey = copy.substring( index+1 ); + + + // [B] Set the object used in the SOAP request so it can do the bulk of the work for us + engineRequest.setSourceBucketName( sourceBucketName ); + engineRequest.setSourceKey( sourceKey ); + engineRequest.setDestinationBucketName( bucketName ); + engineRequest.setDestinationKey( key ); + + engineRequest.setDataDirective( request.getHeader( "x-amz-metadata-directive" )); + engineRequest.setMetaEntries( extractMetaData( request )); + engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); + engineRequest.setConditions( conditionalRequest( request, true )); + + + // [C] Do the actual work and return the result + S3CopyObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + + versionId = engineResponse.getCopyVersion(); + if (null != versionId) response.addHeader( "x-amz-copy-source-version-id", versionId ); + versionId = engineResponse.getPutVersion(); + if (null != versionId) response.addHeader( "x-amz-version-id", versionId ); + + // To allow the copy object result to be serialized via Axiom classes + CopyObjectResponse allBuckets = S3SerializableServiceImplementation.toCopyObjectResponse( engineResponse ); + + OutputStream outputStream = response.getOutputStream(); + response.setStatus(200); + response.setContentType("application/xml"); + // The content-type literally should be "application/xml; charset=UTF-8" + // but any compliant JVM supplies utf-8 by default; + + MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("CopyObjectResult", outputStream ); + resultWriter.startWrite(); + resultWriter.writeout(allBuckets); + resultWriter.stopWrite(); + + } + + private void executeGetObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException, XMLStreamException + { + String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + S3GetObjectAccessControlPolicyRequest engineRequest = new S3GetObjectAccessControlPolicyRequest(); + engineRequest.setBucketName( bucketName ); + engineRequest.setKey( key ); + + // -> is this a request for a specific version of the object? look for "versionId=" in the query string + String queryString = request.getQueryString(); + if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); + + S3AccessControlPolicy engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + int resultCode = engineResponse.getResultCode(); + if (200 != resultCode) { + response.setStatus( resultCode ); + return; + } + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + + + // To allow the get object acl policy result to be serialized via Axiom classes + GetObjectAccessControlPolicyResponse onePolicy = S3SerializableServiceImplementation.toGetObjectAccessControlPolicyResponse( engineResponse ); + + OutputStream outputStream = response.getOutputStream(); + response.setStatus(200); + response.setContentType("application/xml"); + // The content-type literally should be "application/xml; charset=UTF-8" + // but any compliant JVM supplies utf-8 by default; + + MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("GetObjectAccessControlPolicyResult", outputStream ); + resultWriter.startWrite(); + resultWriter.writeout(onePolicy); + resultWriter.stopWrite(); + } + + private void executePutObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException + { + // [A] Determine that there is an applicable bucket which might have an ACL set + + String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName( bucketName ); + String owner = null; + if ( null != bucket ) + owner = bucket.getOwnerCanonicalId(); + if (null == owner) + { + logger.error( "ACL update failed since " + bucketName + " does not exist" ); + throw new IOException("ACL update failed"); + } + if (null == key) + { + logger.error( "ACL update failed since " + bucketName + " does not contain the expected key" ); + throw new IOException("ACL update failed"); + } + + // [B] Obtain the grant request which applies to the acl request string. This latter is supplied as the value of the x-amz-acl header. + + S3SetObjectAccessControlPolicyRequest engineRequest = new S3SetObjectAccessControlPolicyRequest(); + S3Grant grantRequest = new S3Grant(); + S3AccessControlList aclRequest = new S3AccessControlList(); + + String aclRequestString = request.getHeader("x-amz-acl"); + OrderedPair accessControlsForObjectOwner = SAcl.getCannedAccessControls(aclRequestString,"SObject"); + grantRequest.setPermission(accessControlsForObjectOwner.getFirst()); + grantRequest.setGrantee(accessControlsForObjectOwner.getSecond()); + grantRequest.setCanonicalUserID(owner); + aclRequest.addGrant(grantRequest); + engineRequest.setAcl(aclRequest); + engineRequest.setBucketName(bucketName); + engineRequest.setKey(key); + + + // [C] Allow an S3Engine to handle the S3SetObjectAccessControlPolicyRequest + S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + response.setStatus( engineResponse.getResultCode()); + + } + + private void executeGetObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + + S3GetObjectRequest engineRequest = new S3GetObjectRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setInlineData(true); + engineRequest.setReturnData(true); + //engineRequest.setReturnMetadata(true); + engineRequest = setRequestByteRange( request, engineRequest ); + + // -> is this a request for a specific version of the object? look for "versionId=" in the query string + String queryString = request.getQueryString(); + if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); + + S3GetObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + response.setStatus( engineResponse.getResultCode()); + + String deleteMarker = engineResponse.getDeleteMarker(); + if ( null != deleteMarker ) { + response.addHeader( "x-amz-delete-marker", "true" ); + response.addHeader( "x-amz-version-id", deleteMarker ); + } + else { + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + // -> was the get conditional? + if (!conditionPassed( request, response, engineResponse.getLastModified().getTime(), engineResponse.getETag())) + return; + + + // -> is there data to return + // -> from the Amazon REST documentation it appears that Meta data is only returned as part of a HEAD request + //returnMetaData( engineResponse, response ); + + DataHandler dataHandler = engineResponse.getData(); + if (dataHandler != null) { + response.addHeader("ETag", "\"" + engineResponse.getETag() + "\""); + response.addHeader("Last-Modified", DateHelper.getDateDisplayString( + DateHelper.GMT_TIMEZONE, engineResponse.getLastModified().getTime(), "E, d MMM yyyy HH:mm:ss z")); + + response.setContentLength((int)engineResponse.getContentLength()); + S3RestServlet.writeResponse(response, dataHandler.getInputStream()); + } + } + + private void executePutObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String continueHeader = request.getHeader( "Expect" ); + if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { + S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); + } + + long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); + + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setContentLength(contentLength); + engineRequest.setMetaEntries( extractMetaData( request )); + engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); + + DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); + engineRequest.setData(dataHandler); + + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + /** + * Once versioining is turned on then to delete an object requires specifying a version + * parameter. A deletion marker is set once versioning is turned on in a bucket. + */ + private void executeDeleteObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + S3DeleteObjectRequest engineRequest = new S3DeleteObjectRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + + // -> is this a request for a specific version of the object? look for "versionId=" in the query string + String queryString = request.getQueryString(); + if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); + + S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + + response.setStatus( engineResponse.getResultCode()); + String version = engineRequest.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + /* + * The purpose of a plain POST operation is to add an object to a specified bucket using HTML forms. + * The capability is for developer and tester convenience providing a simple browser-based upload + * feature as an alternative to using PUTs. + * In the case of PUTs the upload information is passed through HTTP headers. However in the case of a + * POST this information must be supplied as form fields. Many of these are mandatory or otherwise + * the POST request will be rejected. + * The requester using the HTML page must submit valid credentials sufficient for checking that + * the bucket to which the object is to be added has WRITE permission for that user. The AWS access + * key field on the form is taken to be synonymous with the user canonical ID for this purpose. + */ + private void executePlainPostObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String continueHeader = request.getHeader( "Expect" ); + if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { + S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); + } + + long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); + + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + String accessKey = (String) request.getAttribute(S3Constants.PLAIN_POST_ACCESS_KEY); + String signature = (String) request.getAttribute(S3Constants.PLAIN_POST_SIGNATURE); + S3Grant grant = new S3Grant(); + grant.setCanonicalUserID(accessKey); + grant.setGrantee(SAcl.GRANTEE_USER); + grant.setPermission(SAcl.PERMISSION_FULL); + S3AccessControlList acl = new S3AccessControlList(); + acl.addGrant(grant); + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setAcl(acl); + engineRequest.setContentLength(contentLength); + engineRequest.setMetaEntries( extractMetaData( request )); + engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); + + DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); + engineRequest.setData(dataHandler); + + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + + private void executeHeadObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + S3GetObjectRequest engineRequest = new S3GetObjectRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setInlineData(true); // -> need to set so we get ETag etc returned + engineRequest.setReturnData(true); + engineRequest.setReturnMetadata(true); + engineRequest = setRequestByteRange( request, engineRequest ); + + // -> is this a request for a specific version of the object? look for "versionId=" in the query string + String queryString = request.getQueryString(); + if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); + + S3GetObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + response.setStatus( engineResponse.getResultCode()); + + String deleteMarker = engineResponse.getDeleteMarker(); + if ( null != deleteMarker ) { + response.addHeader( "x-amz-delete-marker", "true" ); + response.addHeader( "x-amz-version-id", deleteMarker ); + } + else { + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + // -> was the head request conditional? + if (!conditionPassed( request, response, engineResponse.getLastModified().getTime(), engineResponse.getETag())) + return; + + + // -> for a head request we return everything except the data + returnMetaData( engineResponse, response ); + + DataHandler dataHandler = engineResponse.getData(); + if (dataHandler != null) { + response.addHeader("ETag", "\"" + engineResponse.getETag() + "\""); + response.addHeader("Last-Modified", DateHelper.getDateDisplayString( + DateHelper.GMT_TIMEZONE, engineResponse.getLastModified().getTime(), "E, d MMM yyyy HH:mm:ss z")); + + response.setContentLength((int)engineResponse.getContentLength()); + } + } + + // There is a problem with POST since the 'Signature' and 'AccessKey' parameters are not + // determined until we hit this function (i.e., they are encoded in the body of the message + // they are not HTTP request headers). All the values we used to get in the request headers + // are not encoded in the request body. + // + // add ETag header computed as Base64 MD5 whenever object is uploaded or updated + // + private void executePostObject( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String contentType = request.getHeader( "Content-Type" ); + int boundaryIndex = contentType.indexOf( "boundary=" ); + String boundary = "--" + (contentType.substring( boundaryIndex + 9 )); + String lastBoundary = boundary + "--"; + + InputStreamReader isr = new InputStreamReader( request.getInputStream()); + BufferedReader br = new BufferedReader( isr ); + + StringBuffer temp = new StringBuffer(); + String oneLine = null; + String name = null; + String value = null; + String metaName = null; // -> after stripped off the x-amz-meta- + boolean isMetaTag = false; + int countMeta = 0; + int state = 0; + + // [A] First parse all the parts out of the POST request and message body + // -> bucket name is still encoded in a Host header + S3AuthParams params = new S3AuthParams(); + List metaSet = new ArrayList(); + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName( bucket ); + + // -> the last body part contains the content that is used to write the S3 object, all + // other body parts are header values + while( null != (oneLine = br.readLine())) + { + if ( oneLine.startsWith( lastBoundary )) + { + // -> this is the data of the object to put + if (0 < temp.length()) + { + value = temp.toString(); + temp.setLength( 0 ); + + engineRequest.setContentLength( value.length()); + engineRequest.setDataAsString( value ); + } + break; + } + else if ( oneLine.startsWith( boundary )) + { + // -> this is the header data + if (0 < temp.length()) + { + value = temp.toString().trim(); + temp.setLength( 0 ); + //System.out.println( "param: " + name + " = " + value ); + + if (name.equalsIgnoreCase( "key" )) { + engineRequest.setKey( value ); + } + else if (name.equalsIgnoreCase( "x-amz-acl" )) { + engineRequest.setCannedAccess( value ); + } + else if (isMetaTag) { + S3MetaDataEntry oneMeta = new S3MetaDataEntry(); + oneMeta.setName( metaName ); + oneMeta.setValue( value ); + metaSet.add( oneMeta ); + countMeta++; + metaName = null; + } + + // -> build up the headers so we can do authentication on this POST + HeaderParam oneHeader = new HeaderParam(); + oneHeader.setName( name ); + oneHeader.setValue( value ); + params.addHeader( oneHeader ); + } + state = 1; + } + else if (1 == state && 0 == oneLine.length()) + { + // -> data of a body part starts here + state = 2; + } + else if (1 == state) + { + // -> the name of the 'name-value' pair is encoded in the Content-Disposition header + if (oneLine.startsWith( "Content-Disposition: form-data;")) + { + isMetaTag = false; + int nameOffset = oneLine.indexOf( "name=" ); + if (-1 != nameOffset) + { + name = oneLine.substring( nameOffset+5 ); + if (name.startsWith( "\"" )) name = name.substring( 1 ); + if (name.endsWith( "\"" )) name = name.substring( 0, name.length()-1 ); + name = name.trim(); + + if (name.startsWith( "x-amz-meta-" )) { + metaName = name.substring( 11 ); + isMetaTag = true; + } + } + } + } + else if (2 == state) + { + // -> the body parts data may take up multiple lines + //System.out.println( oneLine.length() + " body data: " + oneLine ); + temp.append( oneLine ); + } +// else System.out.println( oneLine.length() + " preamble: " + oneLine ); + } + + + // [B] Authenticate the POST request after we have all the headers + try { + S3RestServlet.authenticateRequest( request, params ); + } + catch( Exception e ) { + throw new IOException( e.toString()); + } + + // [C] Perform the request + if (0 < countMeta) engineRequest.setMetaEntries( metaSet.toArray(new S3MetaDataEntry[0])); + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + /** + * Save all the information about the multipart upload request in the database so once it is finished + * (in the future) we can create the real S3 object. + * + * @throws IOException + */ + private void executeInitiateMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + // This request is via a POST which typically has its auth parameters inside the message + try { + S3RestServlet.authenticateRequest( request, S3RestServlet.extractRequestHeaders( request )); + } + catch( Exception e ) { + throw new IOException( e.toString()); + } + + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + String cannedAccess = request.getHeader( "x-amz-acl" ); + S3MetaDataEntry[] meta = extractMetaData( request ); + + // -> the S3 engine has easy access to all the privileged checking code + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setCannedAccess( cannedAccess ); + engineRequest.setMetaEntries( meta ); + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().initiateMultipartUpload( engineRequest ); + int result = engineResponse.getResultCode(); + response.setStatus( result ); + if (200 != result) return; + + // -> there is no SOAP version of this function + StringBuffer xml = new StringBuffer(); + xml.append( "" ); + xml.append( "" ); + xml.append( "" ).append( bucket ).append( "" ); + xml.append( "" ).append( key ).append( "" ); + xml.append( "" ).append( engineResponse.getUploadId()).append( "" ); + xml.append( "" ); + + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); + } + + private void executeUploadPart( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + String continueHeader = request.getHeader( "Expect" ); + if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { + S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); + } + + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + int partNumber = -1; + int uploadId = -1; + + long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); + + String temp = request.getParameter("uploadId"); + if (null != temp) uploadId = Integer.parseInt( temp ); + + temp = request.getParameter("partNumber"); + if (null != temp) partNumber = Integer.parseInt( temp ); + if (partNumber < 1 || partNumber > 10000) { + logger.error("uploadPart invalid part number " + partNumber ); + response.setStatus(416); + return; + } + + // -> verification + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + if (null == uploadDao.multipartExits( uploadId )) { + response.setStatus(404); + return; + } + + // -> another requirement is that only the upload initiator can upload parts + String initiator = uploadDao.getInitiator( uploadId ); + if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) { + response.setStatus(403); + return; + } + } + catch( Exception e ) { + logger.error("executeUploadPart failed due to " + e.getMessage(), e); + response.setStatus(500); + return; + } + + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setContentLength(contentLength); + DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); + engineRequest.setData(dataHandler); + + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().saveUploadPart( engineRequest, uploadId, partNumber ); + if (null != engineResponse.getETag()) response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); + response.setStatus(engineResponse.getResultCode()); + } + + /** + * This function is required to both parsing XML on the request and return XML as part of its result. + * + * @param request + * @param response + * @throws IOException + */ + private void executeCompleteMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + // [A] This request is via a POST which typically has its auth parameters inside the message + try { + S3RestServlet.authenticateRequest( request, S3RestServlet.extractRequestHeaders( request )); + } + catch( Exception e ) { + throw new IOException( e.toString()); + } + + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + S3MultipartPart[] parts = null; + S3MetaDataEntry[] meta = null; + String cannedAccess = null; + int uploadId = -1; + + // AWS S3 specifies that the keep alive connection is by sending whitespace characters until done + // Therefore the XML version prolog is prepended to the stream in advance + OutputStream outputStream = response.getOutputStream(); + outputStream.write("".getBytes()); + + String temp = request.getParameter("uploadId"); + if (null != temp) uploadId = Integer.parseInt( temp ); + + + // [B] Look up all the uploaded body parts and related info + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + if (null == uploadDao.multipartExits( uploadId )) { + response.setStatus(404); + returnErrorXML( 404, "NotFound", outputStream ); + return; + } + + // -> another requirement is that only the upload initiator can upload parts + String initiator = uploadDao.getInitiator( uploadId ); + if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) { + response.setStatus(403); + returnErrorXML( 403, "Forbidden", outputStream ); + return; + } + + parts = uploadDao.getParts( uploadId, 10000, 0 ); + meta = uploadDao.getMeta( uploadId ); + cannedAccess = uploadDao.getCannedAccess( uploadId ); + } + catch( Exception e ) { + logger.error("executeCompleteMultipartUpload failed due to " + e.getMessage(), e); + response.setStatus(500); + returnErrorXML( 500, "InternalError", outputStream ); + return; + } + + + // [C] Parse the given XML body part and perform error checking + OrderedPair match = verifyParts( request.getInputStream(), parts ); + if (200 != match.getFirst().intValue()) { + response.setStatus(match.getFirst().intValue()); + returnErrorXML( match.getFirst().intValue(), match.getSecond(), outputStream ); + return; + } + + + // [D] Ask the engine to create a newly re-constituted object + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setMetaEntries(meta); + engineRequest.setCannedAccess(cannedAccess); + + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().concatentateMultipartUploads( response, engineRequest, parts, outputStream ); + int result = engineResponse.getResultCode(); + // -> free all multipart state since we now have one concatentated object + if (200 == result) ServiceProvider.getInstance().getS3Engine().freeUploadParts( bucket, uploadId, false ); + + // If all successful then clean up all left over parts + // Notice that "" has already been written into the servlet output stream at the beginning of section [A] + if ( 200 == result ) + { + StringBuffer xml = new StringBuffer(); + xml.append( "" ); + xml.append( "" ).append( "http://" + bucket + ".s3.amazonaws.com/" + key ).append( "" ); + xml.append( "" ).append( bucket ).append( "" ); + xml.append( "" ).append( key ).append( "" ); + xml.append( "\"" ).append( engineResponse.getETag()).append( "\"" ); + xml.append( "" ); + String xmlString = xml.toString().replaceAll("^\\s+", ""); // Remove leading whitespace characters + outputStream.write( xmlString.getBytes()); + outputStream.close(); + } + else returnErrorXML( result, null, outputStream ); + } + + private void executeAbortMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + int uploadId = -1; + + String temp = request.getParameter("uploadId"); + if (null != temp) uploadId = Integer.parseInt( temp ); + + int result = ServiceProvider.getInstance().getS3Engine().freeUploadParts( bucket, uploadId, true ); + response.setStatus( result ); + } + + private void executeListUploadParts( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + String bucketName = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + String owner = null; + String initiator = null; + S3MultipartPart[] parts = null; + int remaining = 0; + int uploadId = -1; + int maxParts = 1000; + int partMarker = 0; + int nextMarker = 0; + + String temp = request.getParameter("uploadId"); + if (null != temp) uploadId = Integer.parseInt( temp ); + + temp = request.getParameter("max-parts"); + if (null != temp) { + maxParts = Integer.parseInt( temp ); + if (maxParts > 1000 || maxParts < 0) maxParts = 1000; + } + + temp = request.getParameter("part-number-marker"); + if (null != temp) partMarker = Integer.parseInt( temp ); + + + // -> does the bucket exist, we may need it to verify access permissions + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "listUploadParts failed since " + bucketName + " does not exist" ); + response.setStatus(404); + return; + } + + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + OrderedPair exists = uploadDao.multipartExits( uploadId ); + if (null == exists) { + response.setStatus(404); + return; + } + owner = exists.getFirst(); + + // -> the multipart initiator or bucket owner can do this action + initiator = uploadDao.getInitiator( uploadId ); + if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) + { + try { + // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket + S3PolicyContext context = new S3PolicyContext( PolicyActions.ListMultipartUploadParts, bucketName ); + context.setKeyName( exists.getSecond()); + S3Engine.verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + } + catch (PermissionDeniedException e) { + response.setStatus(403); + return; + } + } + + parts = uploadDao.getParts( uploadId, maxParts, partMarker ); + remaining = uploadDao.numParts( uploadId, partMarker+maxParts ); + } + catch( Exception e ) { + logger.error("List Uploads failed due to " + e.getMessage(), e); + response.setStatus(500); + } + + + StringBuffer xml = new StringBuffer(); + xml.append( "" ); + xml.append( "" ); + xml.append( "" ).append( bucket ).append( "" ); + xml.append( "" ).append( key ).append( "" ); + xml.append( "" ).append( uploadId ).append( "" ); + + // -> currently we just have the access key and have no notion of a display name + xml.append( "" ); + xml.append( "" ).append( initiator ).append( "" ); + xml.append( "" ); + xml.append( "" ); + xml.append( "" ); + xml.append( "" ).append( owner ).append( "" ); + xml.append( "" ); + xml.append( "" ); + + StringBuffer partsList = new StringBuffer(); + for( int i=0; i < parts.length; i++ ) + { + S3MultipartPart onePart = parts[i]; + if (null == onePart) break; + + nextMarker = onePart.getPartNumber(); + partsList.append( "" ); + partsList.append( "" ).append( nextMarker ).append( "" ); + partsList.append( "" ).append( DatatypeConverter.printDateTime( onePart.getLastModified())).append( "" ); + partsList.append( "\"" ).append( onePart.getETag()).append( "\"" ); + partsList.append( "" ).append( onePart.getSize()).append( "" ); + partsList.append( "" ); + } + + xml.append( "STANDARD" ); + xml.append( "" ).append( partMarker ).append( "" ); + xml.append( "" ).append( nextMarker ).append( "" ); + xml.append( "" ).append( maxParts ).append( "" ); + xml.append( "" ).append((0 < remaining ? "true" : "false" )).append( "" ); + + xml.append( partsList.toString()); + xml.append( "" ); + + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); + } + + /** + * Support the "Range: bytes=0-399" header with just one byte range. + * @param request + * @param engineRequest + * @return + */ + private S3GetObjectRequest setRequestByteRange( HttpServletRequest request, S3GetObjectRequest engineRequest ) + { + String temp = request.getHeader( "Range" ); + if (null == temp) return engineRequest; + + int offset = temp.indexOf( "=" ); + if (-1 != offset) + { + String range = temp.substring( offset+1 ); + + String[] parts = range.split( "-" ); + if (2 >= parts.length) { + // -> the end byte is inclusive + engineRequest.setByteRangeStart( Long.parseLong(parts[0])); + engineRequest.setByteRangeEnd( Long.parseLong(parts[1])+1); + } + } + return engineRequest; + } + + private S3ConditionalHeaders conditionalRequest( HttpServletRequest request, boolean isCopy ) + { + S3ConditionalHeaders headers = new S3ConditionalHeaders(); + + if (isCopy) { + headers.setModifiedSince( request.getHeader( "x-amz-copy-source-if-modified-since" )); + headers.setUnModifiedSince( request.getHeader( "x-amz-copy-source-if-unmodified-since" )); + headers.setMatch( request.getHeader( "x-amz-copy-source-if-match" )); + headers.setNoneMatch( request.getHeader( "x-amz-copy-source-if-none-match" )); + } + else { + headers.setModifiedSince( request.getHeader( "If-Modified-Since" )); + headers.setUnModifiedSince( request.getHeader( "If-Unmodified-Since" )); + headers.setMatch( request.getHeader( "If-Match" )); + headers.setNoneMatch( request.getHeader( "If-None-Match" )); + } + return headers; + } + + private boolean conditionPassed( HttpServletRequest request, HttpServletResponse response, Date lastModified, String ETag ) + { + S3ConditionalHeaders ifCond = conditionalRequest( request, false ); + + if (0 > ifCond.ifModifiedSince( lastModified )) { + response.setStatus( 304 ); + return false; + } + if (0 > ifCond.ifUnmodifiedSince( lastModified )) { + response.setStatus( 412 ); + return false; + } + if (0 > ifCond.ifMatchEtag( ETag )) { + response.setStatus( 412 ); + return false; + } + if (0 > ifCond.ifNoneMatchEtag( ETag )) { + response.setStatus( 412 ); + return false; + } + return true; + } + + /** + * Return the saved object's meta data back to the client as HTTP "x-amz-meta-" headers. + * This function is constructing an HTTP header and these headers have a defined syntax + * as defined in rfc2616. Any characters that could cause an invalid HTTP header will + * prevent that meta data from being returned via the REST call (as is defined in the Amazon + * spec). These characters can be defined if using the SOAP API as well as the REST API. + * + * @param engineResponse + * @param response + */ + private void returnMetaData( S3GetObjectResponse engineResponse, HttpServletResponse response ) + { + boolean ignoreMeta = false; + int ignoredCount = 0; + + S3MetaDataEntry[] metaSet = engineResponse.getMetaEntries(); + for( int i=0; null != metaSet && i < metaSet.length; i++ ) + { + String name = metaSet[i].getName(); + String value = metaSet[i].getValue(); + byte[] nameBytes = name.getBytes(); + ignoreMeta = false; + + // -> cannot have control characters (octets 0 - 31) and DEL (127), in an HTTP header + for( int j=0; j < name.length(); j++ ) { + if ((0 <= nameBytes[j] && 31 >= nameBytes[j]) || 127 == nameBytes[j]) { + ignoreMeta = true; + break; + } + } + + // -> cannot have HTTP separators in an HTTP header + if (-1 != name.indexOf('(') || -1 != name.indexOf(')') || -1 != name.indexOf('@') || + -1 != name.indexOf('<') || -1 != name.indexOf('>') || -1 != name.indexOf('\"') || + -1 != name.indexOf('[') || -1 != name.indexOf(']') || -1 != name.indexOf('=') || + -1 != name.indexOf(',') || -1 != name.indexOf(';') || -1 != name.indexOf(':') || + -1 != name.indexOf('\\') || -1 != name.indexOf('/') || -1 != name.indexOf(' ') || + -1 != name.indexOf('{') || -1 != name.indexOf('}') || -1 != name.indexOf('?') || + -1 != name.indexOf('\t') + ) ignoreMeta = true; + + + if ( ignoreMeta ) + ignoredCount++; + else response.addHeader( "x-amz-meta-" + name, value ); + } + + if (0 < ignoredCount) response.addHeader( "x-amz-missing-meta", new String( "" + ignoredCount )); + } + + /** + * Extract the name and value of all meta data so it can be written with the + * object that is being 'PUT'. + * + * @param request + * @return + */ + private S3MetaDataEntry[] extractMetaData( HttpServletRequest request ) + { + List metaSet = new ArrayList(); + int count = 0; + + Enumeration headers = request.getHeaderNames(); + while( headers.hasMoreElements()) + { + String key = (String)headers.nextElement(); + if (key.startsWith( "x-amz-meta-" )) + { + String name = key.substring( 11 ); + String value = request.getHeader( key ); + if (null != value) { + S3MetaDataEntry oneMeta = new S3MetaDataEntry(); + oneMeta.setName( name ); + oneMeta.setValue( value ); + metaSet.add( oneMeta ); + count++; + } + } + } + + if ( 0 < count ) + return metaSet.toArray(new S3MetaDataEntry[0]); + else return null; + } + + /** + * Parameters on the query string may or may not be name-value pairs. + * For example: "?acl&versionId=2", notice that "acl" has no value other + * than it is present. + * + * @param queryString - from a URL to locate the 'find' parameter + * @param find - name string to return first found + * @return the value matching the found name + */ + private String returnParameter( String queryString, String find ) + { + int offset = queryString.indexOf( find ); + if (-1 != offset) + { + String temp = queryString.substring( offset ); + String[] paramList = temp.split( "[&=]" ); + if (null != paramList && 2 <= paramList.length) return paramList[1]; + } + return null; + } + + private void returnErrorXML( int errorCode, String errorDescription, OutputStream os ) throws IOException + { + StringBuffer xml = new StringBuffer(); + + xml.append( "" ); + xml.append( "" ); + + if ( null != errorDescription ) + xml.append( "" ).append( errorDescription ).append( "" ); + else xml.append( "" ).append( errorCode ).append( "" ); + + xml.append( "" ).append( "" ).append( "" ); + xml.append( "" ).append( "" ).append( "" ); + xml.append( "" ).append( "" ).append( "" ); + xml.append( "" ); + + os.write( xml.toString().getBytes()); + os.close(); + } + + /** + * The Complete Multipart Upload function pass in the request body a list of + * all uploaded body parts. It is required that we verify that list matches + * what was uploaded. + * + * @param is + * @param parts + * @return error code, and error string + * @throws ParserConfigurationException, IOException, SAXException + */ + private OrderedPair verifyParts( InputStream is, S3MultipartPart[] parts ) + { + try { + DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware( true ); + + DocumentBuilder db = dbf.newDocumentBuilder(); + Document doc = db.parse( is ); + Node parent = null; + Node contents = null; + NodeList children = null; + String temp = null; + String element = null; + String eTag = null; + int lastNumber = -1; + int partNumber = -1; + int count = 0; + + // -> handle with and without a namespace + NodeList nodeSet = doc.getElementsByTagNameNS( "http://s3.amazonaws.com/doc/2006-03-01/", "Part" ); + count = nodeSet.getLength(); + if (0 == count) { + nodeSet = doc.getElementsByTagName( "Part" ); + count = nodeSet.getLength(); + } + if (count != parts.length) return new OrderedPair(400, "InvalidPart"); + + // -> get a list of all the children elements of the 'Part' parent element + for( int i=0; i < count; i++ ) + { + partNumber = -1; + eTag = null; + parent = nodeSet.item(i); + + if (null != (children = parent.getChildNodes())) + { + int numChildren = children.getLength(); + for( int j=0; j < numChildren; j++ ) + { + contents = children.item( j ); + element = contents.getNodeName().trim(); + if ( element.endsWith( "PartNumber" )) + { + temp = contents.getFirstChild().getNodeValue(); + if (null != temp) partNumber = Integer.parseInt( temp ); + //System.out.println( "part: " + partNumber ); + } + else if (element.endsWith( "ETag" )) + { + eTag = contents.getFirstChild().getNodeValue(); + //System.out.println( "etag: " + eTag ); + } + } + } + + // -> do the parts given in the call XML match what was previously uploaded? + if (lastNumber >= partNumber) { + return new OrderedPair(400, "InvalidPartOrder"); + } + if (partNumber != parts[i].getPartNumber() || + eTag == null || + !eTag.equalsIgnoreCase( "\"" + parts[i].getETag() + "\"" )) { + return new OrderedPair(400, "InvalidPart"); + } + + lastNumber = partNumber; + } + return new OrderedPair(200, "Success"); + } + catch( Exception e ) { + return new OrderedPair(500, e.toString()); + } + } +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java index 98859053fb8..2eec930810a 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java @@ -1,2282 +1,2138 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.ec2; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.security.SignatureException; -import java.sql.SQLException; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; -import java.util.UUID; - -<<<<<<< HEAD -import javax.xml.parsers.ParserConfigurationException; - -import org.apache.log4j.Logger; -import org.xml.sax.SAXException; - -import com.cloud.bridge.persist.dao.CloudStackSvcOfferingDao; -======= -import org.apache.log4j.Logger; - ->>>>>>> 6472e7b... Now really adding the renamed files! -import com.cloud.bridge.persist.dao.OfferingDao; -import com.cloud.bridge.service.UserContext; -import com.cloud.bridge.service.exception.EC2ServiceException; -import com.cloud.bridge.service.exception.EC2ServiceException.ClientError; -import com.cloud.bridge.service.exception.EC2ServiceException.ServerError; -import com.cloud.bridge.util.ConfigurationHelper; -import com.cloud.stack.CloudStackApi; -import com.cloud.stack.models.CloudStackAccount; -import com.cloud.stack.models.CloudStackDiskOffering; -import com.cloud.stack.models.CloudStackInfoResponse; -import com.cloud.stack.models.CloudStackIngressRule; -import com.cloud.stack.models.CloudStackIpAddress; -import com.cloud.stack.models.CloudStackKeyPair; -import com.cloud.stack.models.CloudStackKeyValue; -import com.cloud.stack.models.CloudStackNetwork; -import com.cloud.stack.models.CloudStackNetworkOffering; -import com.cloud.stack.models.CloudStackNic; -import com.cloud.stack.models.CloudStackOsType; -import com.cloud.stack.models.CloudStackPasswordData; -import com.cloud.stack.models.CloudStackResourceLimit; -import com.cloud.stack.models.CloudStackSecurityGroup; -import com.cloud.stack.models.CloudStackSecurityGroupIngress; -<<<<<<< HEAD -import com.cloud.stack.models.CloudStackServiceOffering; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! -import com.cloud.stack.models.CloudStackSnapshot; -import com.cloud.stack.models.CloudStackTemplate; -import com.cloud.stack.models.CloudStackUser; -import com.cloud.stack.models.CloudStackUserVm; -import com.cloud.stack.models.CloudStackVolume; -import com.cloud.stack.models.CloudStackZone; - -/** - * EC2Engine processes the ec2 commands and calls their cloudstack analogs - * - */ -public class EC2Engine { - protected final static Logger logger = Logger.getLogger(EC2Engine.class); - String managementServer = null; - String cloudAPIPort = null; - - private CloudStackApi _eng = null; - - private CloudStackAccount currentAccount = null; - - public EC2Engine() throws IOException { - loadConfigValues(); - } - - /** - * Which management server to we talk to? - * Load a mapping form Amazon values for 'instanceType' to cloud defined - * diskOfferingId and serviceOfferingId. - * - * @throws IOException - */ - private void loadConfigValues() throws IOException { - File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); - if (null != propertiesFile) { - logger.info("Use EC2 properties file: " + propertiesFile.getAbsolutePath()); - Properties EC2Prop = new Properties(); - try { - EC2Prop.load( new FileInputStream( propertiesFile )); - } catch (FileNotFoundException e) { - logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); - } catch (IOException e) { - logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); - } - managementServer = EC2Prop.getProperty( "managementServer" ); - cloudAPIPort = EC2Prop.getProperty( "cloudAPIPort", null ); - - OfferingDao ofDao = new OfferingDao(); - try { - if(ofDao.getOfferingCount() == 0) { - String strValue = EC2Prop.getProperty("m1.small.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m1.small", strValue); - - strValue = EC2Prop.getProperty("m1.large.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m1.large", strValue); - - strValue = EC2Prop.getProperty("m1.xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m1.xlarge", strValue); - - strValue = EC2Prop.getProperty("c1.medium.serviceId"); - if(strValue != null) ofDao.setOfferMapping("c1.medium", strValue); - - strValue = EC2Prop.getProperty("c1.xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("c1.xlarge", strValue); - - strValue = EC2Prop.getProperty("m2.xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m2.xlarge", strValue); - - strValue = EC2Prop.getProperty("m2.2xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m2.2xlarge", strValue); - - strValue = EC2Prop.getProperty("m2.4xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m2.4xlarge", strValue); - - strValue = EC2Prop.getProperty("cc1.4xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("cc1.4xlarge", strValue); - } - } catch(Exception e) { - logger.error("Unexpected exception ", e); - } - } else logger.error( "ec2-service.properties not found" ); - } - - /** - * Helper function to manage the api connection - * - * @return - */ - private CloudStackApi getApi() { - if (_eng == null) { - _eng = new CloudStackApi(managementServer, cloudAPIPort, false); - } - // regardless of whether _eng is initialized, we must make sure - // access/secret keys are current with what's in the UserCredentials - _eng.setApiKey(UserContext.current().getAccessKey()); - _eng.setSecretKey(UserContext.current().getSecretKey()); - return _eng; - } - - - /** - * Verifies account can access CloudStack - * - * @param accessKey - * @param secretKey - * @return - * @throws EC2ServiceException - */ - public boolean validateAccount( String accessKey, String secretKey ) throws EC2ServiceException { - String oldApiKey = null; - String oldSecretKey = null; - - if (accessKey == null || secretKey == null) { - return false; - } - - // okay, instead of using the getApi() nonsense for validate, we are going to manage _eng - if (_eng == null) { - _eng = new CloudStackApi(managementServer, cloudAPIPort, false); - } - - try { - oldApiKey = _eng.getApiKey(); - oldSecretKey = _eng.getSecretKey(); - } catch(Exception e) { - // we really don't care, and expect this - } - try { - _eng.setApiKey(accessKey); - _eng.setSecretKey(secretKey); - List accts = _eng.listAccounts(null, null, null, null, null, null, null, null); - if (oldApiKey != null && oldSecretKey != null) { - _eng.setApiKey(oldApiKey); - _eng.setSecretKey(oldSecretKey); - } - if (accts == null) { - return false; - } - return true; - } catch(Exception e) { - logger.error("Validate account failed!"); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * Creates a security group - * - * @param groupName - * @param groupDesc - * @return - */ - public Boolean createSecurityGroup(String groupName, String groupDesc) { - try { - CloudStackSecurityGroup grp = getApi().createSecurityGroup(groupName, null, groupDesc, null); - if (grp != null && grp.getId() != null) { - return true; - } - return false; - } catch( Exception e ) { - logger.error( "EC2 CreateSecurityGroup - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * Deletes a security group - * - * @param groupName - * @return - */ - public boolean deleteSecurityGroup(String groupName) { - try { - CloudStackInfoResponse resp = getApi().deleteSecurityGroup(null, null, null, groupName); - if (resp != null) { - return resp.getSuccess(); - } - return false; - } catch( Exception e ) { - logger.error( "EC2 DeleteSecurityGroup - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * returns a list of security groups - * - * @param request - * @return - */ - public EC2DescribeSecurityGroupsResponse describeSecurityGroups(EC2DescribeSecurityGroups request) - { - try { - EC2DescribeSecurityGroupsResponse response = listSecurityGroups( request.getGroupSet()); - EC2GroupFilterSet gfs = request.getFilterSet(); - - if ( null == gfs ) - return response; - else return gfs.evaluate( response ); - } catch( Exception e ) { - logger.error( "EC2 DescribeSecurityGroups - ", e); - throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); - } - } - - /** - * CloudStack supports revoke only by using the ruleid of the ingress rule. - * We list all security groups and find the matching group and use the first ruleId we find. - * - * @param request - * @return - */ - public boolean revokeSecurityGroup( EC2AuthorizeRevokeSecurityGroup request ) - { - if (null == request.getName()) throw new EC2ServiceException(ServerError.InternalError, "Name is a required parameter"); - try { - String[] groupSet = new String[1]; - groupSet[0] = request.getName(); - String ruleId = null; - - EC2IpPermission[] items = request.getIpPermissionSet(); - - EC2DescribeSecurityGroupsResponse response = listSecurityGroups( groupSet ); - EC2SecurityGroup[] groups = response.getGroupSet(); - - for (EC2SecurityGroup group : groups) { - EC2IpPermission[] perms = group.getIpPermissionSet(); - for (EC2IpPermission perm : perms) { - ruleId = doesRuleMatch( items[0], perm ); - } - } - - if (null == ruleId) - throw new EC2ServiceException(ClientError.InvalidGroup_NotFound, "Cannot find matching ruleid."); - - CloudStackInfoResponse resp = getApi().revokeSecurityGroupIngress(ruleId); - if (resp != null && resp.getId() != null) { - return resp.getSuccess(); - } - return false; - } catch( Exception e ) { - logger.error( "EC2 revokeSecurityGroupIngress" + " - " + e.getMessage()); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * authorizeSecurityGroup - * - * @param request - ip permission parameters - */ - public boolean authorizeSecurityGroup(EC2AuthorizeRevokeSecurityGroup request ) - { - if (null == request.getName()) throw new EC2ServiceException(ServerError.InternalError, "Name is a required parameter"); - - EC2IpPermission[] items = request.getIpPermissionSet(); - - try { - for (EC2IpPermission ipPerm : items) { - EC2SecurityGroup[] groups = ipPerm.getUserSet(); - - List secGroupList = new ArrayList(); - for (EC2SecurityGroup group : groups) { - CloudStackKeyValue pair = new CloudStackKeyValue(); - pair.setKeyValue(group.getAccount(), group.getName()); - secGroupList.add(pair); - } - CloudStackSecurityGroupIngress resp = null; - if (ipPerm.getProtocol().equalsIgnoreCase("icmp")) { - resp = getApi().authorizeSecurityGroupIngress(null, constructCIDRList(ipPerm.getIpRangeSet()), null, null, - ipPerm.getToPort().toString(), ipPerm.getFromPort().toString(), ipPerm.getProtocol(), null, - request.getName(), null, secGroupList); - } else { - resp = getApi().authorizeSecurityGroupIngress(null, constructCIDRList(ipPerm.getIpRangeSet()), null, - ipPerm.getToPort().longValue(), null, null, ipPerm.getProtocol(), null, request.getName(), - ipPerm.getFromPort().longValue(), secGroupList); - } - if (resp != null && resp.getRuleId() != null) { - return true; - } - return false; - } - } catch(Exception e) { - logger.error( "EC2 AuthorizeSecurityGroupIngress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - return true; - } - - /** - * Does the permission from the request (left) match the permission from the cloudStack query (right). - * If the cloudStack rule matches then we return its ruleId. - * - * @param permLeft - * @param permRight - * @return ruleId of the cloudstack rule - */ - private String doesRuleMatch(EC2IpPermission permLeft, EC2IpPermission permRight) - { - int matches = 0; - - if (null != permLeft.getIcmpType() && null != permLeft.getIcmpCode()) { - if (null == permRight.getIcmpType() || null == permRight.getIcmpCode()) return null; - - if (!permLeft.getIcmpType().equalsIgnoreCase( permRight.getIcmpType())) return null; - if (!permLeft.getIcmpCode().equalsIgnoreCase( permRight.getIcmpCode())) return null; - matches++; - } - - // -> "Valid Values for EC2 security groups: tcp | udp | icmp or the corresponding protocol number (6 | 17 | 1)." - if (null != permLeft.getProtocol()) { - if (null == permRight.getProtocol()) return null; - - String protocol = permLeft.getProtocol(); - if (protocol.equals( "6" )) protocol = "tcp"; - else if (protocol.equals( "17" )) protocol = "udp"; - else if (protocol.equals( "1" )) protocol = "icmp"; - - if (!protocol.equalsIgnoreCase( permRight.getProtocol())) return null; - matches++; - } - - - if (null != permLeft.getCIDR()) { - if (null == permRight.getCIDR()) return null; - - if (!permLeft.getCIDR().equalsIgnoreCase( permRight.getCIDR())) return null; - matches++; - } - - // -> is the port(s) from the request (left) a match of the rule's port(s) - if (0 != permLeft.getFromPort()) { - // -> -1 means all ports match - if (-1 != permLeft.getFromPort()) { - if (permLeft.getFromPort().compareTo(permRight.getFromPort()) != 0 || - permLeft.getToPort().compareTo(permRight.getToPort()) != 0) - return null; - } - matches++; - } - - - // -> was permLeft set up properly with at least one property to match? - if ( 0 == matches ) - return null; - else return permRight.getRuleId(); - } - - - /** - * Cloud Stack API takes a comma separated list of IP ranges as one parameter. - * - * @throws UnsupportedEncodingException - */ - private String constructCIDRList( String[] ipRanges ) throws UnsupportedEncodingException - { - if (null == ipRanges || 0 == ipRanges.length) return null; - StringBuffer cidrList = new StringBuffer(); - - for( int i=0; i < ipRanges.length; i++ ) { - if (0 < i) cidrList.append( "," ); - cidrList.append( ipRanges[i] ); - } - return cidrList.toString(); - } - - /** - * Returns a list of all snapshots - * - * @param request - * @return - */ - public EC2DescribeSnapshotsResponse handleRequest( EC2DescribeSnapshots request ) - { - EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); - EC2SnapshotFilterSet sfs = request.getFilterSet(); - - try { - // -> query to get the volume size for each snapshot - EC2DescribeSnapshotsResponse response = listSnapshots( request.getSnapshotSet()); - if (response == null) { - return new EC2DescribeSnapshotsResponse(); - } - EC2Snapshot[] snapshots = response.getSnapshotSet(); - for (EC2Snapshot snap : snapshots) { - volumes = listVolumes(snap.getVolumeId(), null, volumes); - EC2Volume[] volSet = volumes.getVolumeSet(); - if (0 < volSet.length) snap.setVolumeSize(volSet[0].getSize()); - volumes.reset(); - } - - if ( null == sfs ) - return response; - else return sfs.evaluate( response ); - } catch( EC2ServiceException error ) { - logger.error( "EC2 DescribeSnapshots - ", error); - throw error; - - } catch( Exception e ) { - logger.error( "EC2 DescribeSnapshots - ", e); - throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); - } - } - - /** - * Creates a snapshot - * - * @param volumeId - * @return - */ - public EC2Snapshot createSnapshot( String volumeId ) { - try { - - CloudStackSnapshot snap = getApi().createSnapshot(volumeId, null, null, null); - if (snap == null) { - throw new EC2ServiceException(ServerError.InternalError, "Unable to create snapshot!"); - } - EC2Snapshot ec2Snapshot = new EC2Snapshot(); - - ec2Snapshot.setId(snap.getId()); - ec2Snapshot.setName(snap.getName()); - ec2Snapshot.setType(snap.getSnapshotType()); - ec2Snapshot.setAccountName(snap.getAccountName()); - ec2Snapshot.setDomainId(snap.getDomainId()); - ec2Snapshot.setCreated(snap.getCreated()); - ec2Snapshot.setVolumeId(snap.getVolumeId()); - - List vols = getApi().listVolumes(null, null, null, snap.getVolumeId(), null, null, null, null, null, null, null); - - if(vols.size() > 0) { - assert(vols.get(0).getSize() != null); - Long sizeInGB = vols.get(0).getSize().longValue()/1073741824; - ec2Snapshot.setVolumeSize(sizeInGB); - } - - return ec2Snapshot; - } catch( Exception e ) { - logger.error( "EC2 CreateSnapshot - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * Deletes a snapshot - * - * @param snapshotId - * @return - */ - public boolean deleteSnapshot(String snapshotId) { - try { - - CloudStackInfoResponse resp = getApi().deleteSnapshot(snapshotId); -<<<<<<< HEAD - if(resp != null) { - return resp.getSuccess(); - } -======= - if(resp.getJobId() != null) - return true; ->>>>>>> 6472e7b... Now really adding the renamed files! - - return false; - } catch(Exception e) { - logger.error( "EC2 DeleteSnapshot - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Modify an existing template - * - * @param request - * @return - */ - public boolean modifyImageAttribute( EC2Image request ) - { - // TODO: This is incomplete - EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); - - try { - images = listTemplates( request.getId(), images ); - EC2Image[] imageSet = images.getImageSet(); - - CloudStackTemplate resp = getApi().updateTemplate(request.getId(), null, request.getDescription(), null, imageSet[0].getName(), null, null); - if (resp != null) { - return true; - } - return false; - } catch( Exception e ) { - logger.error( "EC2 ModifyImage - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * If given a specific list of snapshots of interest, then only values from those snapshots are returned. - * - * @param interestedShots - can be null, should be a subset of all snapshots - */ - private EC2DescribeSnapshotsResponse listSnapshots( String[] interestedShots ) throws Exception { - EC2DescribeSnapshotsResponse snapshots = new EC2DescribeSnapshotsResponse(); - - List cloudSnaps; - if (interestedShots == null || interestedShots.length == 0) { - cloudSnaps = getApi().listSnapshots(null, null, null, null, null, null, null, null, null); - } else { - cloudSnaps = new ArrayList(); - - for(String id : interestedShots) { - List tmpList = getApi().listSnapshots(null, null, id, null, null, null, null, null, null); - cloudSnaps.addAll(tmpList); - } - } - - if (cloudSnaps == null) { - return null; - } - - for(CloudStackSnapshot cloudSnapshot : cloudSnaps) { - EC2Snapshot shot = new EC2Snapshot(); - shot.setId(cloudSnapshot.getId()); - shot.setName(cloudSnapshot.getName()); - shot.setVolumeId(cloudSnapshot.getVolumeId()); - shot.setType(cloudSnapshot.getSnapshotType()); - shot.setState(cloudSnapshot.getState()); - shot.setCreated(cloudSnapshot.getCreated()); - shot.setAccountName(cloudSnapshot.getAccountName()); - shot.setDomainId(cloudSnapshot.getDomainId()); - - snapshots.addSnapshot(shot); - } - return snapshots; - } - - - // handlers - /** - * return password data from the instance - * - * @param instanceId - * @return - */ - public EC2PasswordData getPasswordData(String instanceId) { - try { - CloudStackPasswordData resp = getApi().getVMPassword(instanceId); - EC2PasswordData passwdData = new EC2PasswordData(); - if (resp != null) { - passwdData.setInstanceId(instanceId); - passwdData.setEncryptedPassword(resp.getEncryptedpassword()); - } - return passwdData; - } catch(Exception e) { - logger.error("EC2 GetPasswordData - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - /** - * Lists SSH KeyPairs on the systme - * - * @param request - * @return - */ - public EC2DescribeKeyPairsResponse describeKeyPairs( EC2DescribeKeyPairs request ) { - try { - EC2KeyPairFilterSet filterSet = request.getKeyFilterSet(); - String[] keyNames = request.getKeyNames(); - List keyPairs = getApi().listSSHKeyPairs(null, null, null); - List keyPairsList = new ArrayList(); - - if (keyPairs != null) { - // Let's trim the list of keypairs to only the ones listed in keyNames - if (keyNames != null && keyNames.length > 0) { - for (CloudStackKeyPair keyPair : keyPairs) { - boolean matched = false; - for (String keyName : keyNames) { - if (keyPair.getName().contains(keyName)) { - matched = true; - break; - } - } - if (matched == false) { - keyPairs.remove(keyPair); - } - } - } - - if (keyPairs.isEmpty() == true) { - throw new EC2ServiceException(ServerError.InternalError, "No keypairs left!"); - } - - // this should be reworked... converting from CloudStackKeyPairResponse to EC2SSHKeyPair is dumb - for (CloudStackKeyPair respKeyPair: keyPairs) { - EC2SSHKeyPair ec2KeyPair = new EC2SSHKeyPair(); - ec2KeyPair.setFingerprint(respKeyPair.getFingerprint()); - ec2KeyPair.setKeyName(respKeyPair.getName()); - ec2KeyPair.setPrivateKey(respKeyPair.getPrivatekey()); - keyPairsList.add(ec2KeyPair); - } - } - return filterSet.evaluate(keyPairsList); - } catch(Exception e) { - logger.error("EC2 DescribeKeyPairs - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * Delete SSHKeyPair - * - * @param request - * @return - */ - public boolean deleteKeyPair( EC2DeleteKeyPair request ) { - try { - CloudStackInfoResponse resp = getApi().deleteSSHKeyPair(request.getKeyName(), null, null); - if (resp == null) { - throw new Exception("Ivalid CloudStack API response"); - } - - return resp.getSuccess(); - } catch(Exception e) { - logger.error("EC2 DeleteKeyPair - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * Create SSHKeyPair - * - * @param request - * @return - */ - public EC2SSHKeyPair createKeyPair(EC2CreateKeyPair request) { - try { - CloudStackKeyPair resp = getApi().createSSHKeyPair(request.getKeyName(), null, null); - if (resp == null) { - throw new Exception("Ivalid CloudStack API response"); - } - - EC2SSHKeyPair response = new EC2SSHKeyPair(); - response.setFingerprint(resp.getFingerprint()); - response.setKeyName(resp.getName()); - response.setPrivateKey(resp.getPrivatekey()); - - return response; - } catch (Exception e) { - logger.error("EC2 CreateKeyPair - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * Import an existing SSH KeyPair - * - * @param request - * @return - */ - public EC2SSHKeyPair importKeyPair( EC2ImportKeyPair request ) { - try { - CloudStackKeyPair resp = getApi().registerSSHKeyPair(request.getKeyName(), request.getPublicKeyMaterial()); - if (resp == null) { - throw new Exception("Ivalid CloudStack API response"); - } - - EC2SSHKeyPair response = new EC2SSHKeyPair(); - response.setFingerprint(resp.getFingerprint()); - response.setKeyName(resp.getName()); - response.setPrivateKey(resp.getPrivatekey()); - - return response; - } catch (Exception e) { - logger.error("EC2 ImportKeyPair - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * list ip addresses that have been allocated - * - * @param request - * @return - */ - public EC2DescribeAddressesResponse describeAddresses( EC2DescribeAddresses request ) { - try { - List addrList = getApi().listPublicIpAddresses(null, null, null, null, null, null, null, null, null); - - EC2AddressFilterSet filterSet = request.getFilterSet(); - List addressList = new ArrayList(); - if (addrList != null && addrList.size() > 0) { - for (CloudStackIpAddress addr: addrList) { - // remember, if no filters are set, request.inPublicIpSet always returns true - if (request.inPublicIpSet(addr.getIpAddress())) { - EC2Address ec2Address = new EC2Address(); - ec2Address.setIpAddress(addr.getIpAddress()); - if (addr.getVirtualMachineId() != null) - ec2Address.setAssociatedInstanceId(addr.getVirtualMachineId().toString()); - addressList.add(ec2Address); - } - } - } - - return filterSet.evaluate(addressList); - } catch(Exception e) { - logger.error("EC2 DescribeAddresses - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * release an IP Address - * - * @param request - * @return - */ - public boolean releaseAddress(EC2ReleaseAddress request) { - try { - CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); - CloudStackInfoResponse resp = getApi().disassociateIpAddress(cloudIp.getId()); - if (resp != null) { - return resp.getSuccess(); - } - } catch(Exception e) { - logger.error("EC2 ReleaseAddress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - return false; - } - - /** - * Associate an address with an instance - * - * @param request - * @return - */ - public boolean associateAddress( EC2AssociateAddress request ) { - try { - CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); - CloudStackUserVm cloudVm = getApi().listVirtualMachines(null, null, null, null, null, null, request.getInstanceId(), null, null, null, null, null, null, null, null).get(0); - - CloudStackInfoResponse resp = getApi().enableStaticNat(cloudIp.getId(), cloudVm.getId()); - if (resp != null) { - return resp.getSuccess(); - } - } catch(Exception e) { - logger.error( "EC2 AssociateAddress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - return false; - } - - /** - * Disassociate an address from an instance - * - * @param request - * @return - */ - public boolean disassociateAddress( EC2DisassociateAddress request ) { - try { - CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); - CloudStackInfoResponse resp = getApi().disassociateIpAddress(cloudIp.getId()); - if (resp != null) { - return resp.getSuccess(); - } - } catch(Exception e) { - logger.error( "EC2 DisassociateAddress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - return false; - } - - /** - * Allocate an address - * - * @param request - * @return - */ - public EC2Address allocateAddress() - { - try { - EC2Address ec2Address = new EC2Address(); - // this gets our networkId - CloudStackAccount caller = getCurrentAccount(); - - CloudStackZone zone = findZone(); - CloudStackNetwork net = findNetwork(zone); -// CloudStackIpAddress resp = getApi().associateIpAddress(null, null, null, "0036952d-48df-4422-9fd0-94b0885e18cb"); - CloudStackIpAddress resp = getApi().associateIpAddress(null, null, null, net.getId()); - ec2Address.setAssociatedInstanceId(resp.getId()); - if (resp.getIpAddress() == null) { - List addrList = getApi().listPublicIpAddresses(null, null, null, null, null, null, null, null, null); - if (addrList != null && addrList.size() > 0) { - for (CloudStackIpAddress addr: addrList) { - if (addr.getId().equalsIgnoreCase(resp.getId())) { - ec2Address.setIpAddress(addr.getIpAddress()); - } - } - } - } else { - ec2Address.setIpAddress(resp.getIpAddress()); - } - - return ec2Address; - } catch(Exception e) { - logger.error( "EC2 AllocateAddress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * List of templates available. We only support the imageSet version of this call or when no search parameters are passed - * which results in asking for all templates. - * - * @param request - * @return - */ - public EC2DescribeImagesResponse describeImages(EC2DescribeImages request) - { - EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); - - try { - String[] templateIds = request.getImageSet(); - - if ( 0 == templateIds.length ) { - return listTemplates(null, images); - } - for (String s : templateIds) { - images = listTemplates(s, images); - } - return images; - - } catch( Exception e ) { - logger.error( "EC2 DescribeImages - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Create a template - * Amazon API just gives us the instanceId to create the template from. - * But our createTemplate function requires the volumeId and osTypeId. - * So to get that we must make the following sequence of cloud API calls: - * 1) listVolumes&virtualMachineId= -- gets the volumeId - * 2) listVirtualMachinees&id= -- gets the templateId - * 3) listTemplates&id= -- gets the osTypeId - * - * If we have to start and stop the VM in question then this function is - * going to take a long time to complete. - * - * @param request - * @return - */ - public EC2CreateImageResponse createImage(EC2CreateImage request) - { - EC2CreateImageResponse response = null; - boolean needsRestart = false; - String volumeId = null; - - try { - // [A] Creating a template from a VM volume should be from the ROOT volume - // Also for this to work the VM must be in a Stopped state so we 'reboot' it if its not - EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); - volumes = listVolumes( null, request.getInstanceId(), volumes ); - EC2Volume[] volSet = volumes.getVolumeSet(); - for (EC2Volume vol : volSet) { - if (vol.getType().equalsIgnoreCase( "ROOT" )) { - String vmState = vol.getVMState(); - if (vmState.equalsIgnoreCase( "running" ) || vmState.equalsIgnoreCase( "starting" )) { - needsRestart = true; - if (!stopVirtualMachine( request.getInstanceId() )) - throw new EC2ServiceException(ClientError.IncorrectState, "CreateImage - instance must be in a stopped state"); - } - volumeId = vol.getId(); - break; - } - } - - // [B] The parameters must be in sorted order for proper signature generation - EC2DescribeInstancesResponse instances = new EC2DescribeInstancesResponse(); - instances = lookupInstances( request.getInstanceId(), instances ); - EC2Instance[] instanceSet = instances.getInstanceSet(); - String templateId = instanceSet[0].getTemplateId(); - - EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); - images = listTemplates( templateId, images ); - EC2Image[] imageSet = images.getImageSet(); - String osTypeId = imageSet[0].getOsTypeId(); - - CloudStackTemplate resp = getApi().createTemplate((request.getDescription() == null ? "" : request.getDescription()), request.getName(), - osTypeId, null, null, null, null, null, null, volumeId); - if (resp == null || resp.getId() == null) { - throw new EC2ServiceException(ServerError.InternalError, "An upexpected error occurred."); - } -<<<<<<< HEAD - - //if template was created succesfully, create the new image response - response = new EC2CreateImageResponse(); - response.setId(resp.getId()); -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - - // [C] If we stopped the virtual machine now we need to restart it - if (needsRestart) { - if (!startVirtualMachine( request.getInstanceId() )) - throw new EC2ServiceException(ServerError.InternalError, - "CreateImage - restarting instance " + request.getInstanceId() + " failed"); - } - return response; - - } catch( Exception e ) { - logger.error( "EC2 CreateImage - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Register a template - * - * @param request - * @return - */ - public EC2CreateImageResponse registerImage(EC2RegisterImage request) - { - try { - CloudStackAccount caller = getCurrentAccount(); - if (null == request.getFormat() || null == request.getName() || null == request.getOsTypeName() || - null == request.getLocation() || null == request.getZoneName()) - throw new EC2ServiceException(ServerError.InternalError, "Missing parameter - location/architecture/name"); - - List templates = getApi().registerTemplate((request.getDescription() == null ? request.getName() : request.getDescription()), -<<<<<<< HEAD - request.getFormat(), request.getHypervisor(), request.getName(), toOSTypeId(request.getOsTypeName()), request.getLocation(), - toZoneId(request.getZoneName(), null), null, null, null, null, null, null, null, null, null); -======= - request.getFormat(), null, request.getName(), toOSTypeId(request.getOsTypeName()), request.getLocation(), - toZoneId(request.getZoneName(), caller.getDomainId()), null, null, null, null, null, null, null, null, null); ->>>>>>> 6472e7b... Now really adding the renamed files! - if (templates != null) { - // technically we will only ever register a single template... - for (CloudStackTemplate template : templates) { - if (template != null && template.getId() != null) { - EC2CreateImageResponse image = new EC2CreateImageResponse(); - image.setId(template.getId().toString()); - return image; - } - } - } - return null; - } catch( Exception e ) { - logger.error( "EC2 RegisterImage - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Deregister a template(image) - * Our implementation is different from Amazon in that we do delete the template - * when we deregister it. The cloud API has not deregister call. - * - * @param image - * @return - */ - public boolean deregisterImage( EC2Image image ) - { - try { - CloudStackInfoResponse resp = getApi().deleteTemplate(image.getId(), null); - return resp.getSuccess(); - } catch( Exception e ) { - logger.error( "EC2 DeregisterImage - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * list instances - * - * @param request - * @return - */ - public EC2DescribeInstancesResponse describeInstances(EC2DescribeInstances request ) { - try { - return listVirtualMachines( request.getInstancesSet(), request.getFilterSet()); - } catch( Exception e ) { - logger.error( "EC2 DescribeInstances - " ,e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * list Zones - * - * @param request - * @return - */ - public EC2DescribeAvailabilityZonesResponse handleRequest(EC2DescribeAvailabilityZones request) { - try { - CloudStackAccount caller = getCurrentAccount(); - -<<<<<<< HEAD - return listZones(request.getZoneSet(), null); -======= - return listZones(request.getZoneSet(), caller.getDomainId()); ->>>>>>> 6472e7b... Now really adding the renamed files! - - } catch( EC2ServiceException error ) { - logger.error( "EC2 DescribeAvailabilityZones - ", error); - throw error; - - } catch( Exception e ) { - logger.error( "EC2 DescribeAvailabilityZones - " ,e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * list volumes - * - * @param request - * @return - */ - public EC2DescribeVolumesResponse handleRequest( EC2DescribeVolumes request ) { - EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); - EC2VolumeFilterSet vfs = request.getFilterSet(); - - try { - String[] volumeIds = request.getVolumeSet(); - if ( 0 == volumeIds.length ){ - volumes = listVolumes( null, null, volumes ); - } else { - for (String s : volumeIds) - volumes = listVolumes(s, null, volumes ); - } - - if ( null == vfs ) - return volumes; - else return vfs.evaluate( volumes ); - } catch( Exception e ) { - logger.error( "EC2 DescribeVolumes - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Attach a volume to an instance - * - * @param request - * @return - */ - public EC2Volume attachVolume( EC2Volume request ) { - try { -<<<<<<< HEAD - request.setDeviceId(mapDeviceToCloudDeviceId(request.getDevice())); -======= - request.setDeviceId( mapDeviceToCloudDeviceId(request.getDevice())); ->>>>>>> 6472e7b... Now really adding the renamed files! - EC2Volume resp = new EC2Volume(); - - CloudStackVolume vol = getApi().attachVolume(request.getId(), request.getInstanceId(), request.getDeviceId()); - if(vol != null) { - resp.setAttached(vol.getAttached()); - resp.setCreated(vol.getCreated()); - resp.setDevice(request.getDevice()); - resp.setDeviceId(vol.getDeviceId()); - resp.setHypervisor(vol.getHypervisor()); - resp.setId(vol.getId()); - resp.setInstanceId(vol.getVirtualMachineId()); - resp.setSize(vol.getSize()); - resp.setSnapshotId(vol.getSnapshotId()); - resp.setState(vol.getState()); - resp.setType(vol.getVolumeType()); - resp.setVMState(vol.getVirtualMachineState()); - resp.setZoneName(vol.getZoneName()); - return resp; - } - throw new EC2ServiceException( ServerError.InternalError, "An unexpected error occurred." ); - } catch( Exception e ) { - logger.error( "EC2 AttachVolume 2 - ", e); - throw new EC2ServiceException( ServerError.InternalError, e.getMessage() != null ? e.getMessage() : e.toString()); - } - } - - /** - * Detach a volume from an instance - * - * @param request - * @return - */ - public EC2Volume detachVolume(EC2Volume request) { - try { - CloudStackVolume vol = getApi().detachVolume(null, request.getId(), null); - EC2Volume resp = new EC2Volume(); - - if(vol != null) { - resp.setAttached(vol.getAttached()); - resp.setCreated(vol.getCreated()); - resp.setDevice(request.getDevice()); - resp.setDeviceId(vol.getDeviceId()); - resp.setHypervisor(vol.getHypervisor()); - resp.setId(vol.getId()); - resp.setInstanceId(vol.getVirtualMachineId()); - resp.setSize(vol.getSize()); - resp.setSnapshotId(vol.getSnapshotId()); - resp.setState(vol.getState()); - resp.setType(vol.getVolumeType()); - resp.setVMState(vol.getVirtualMachineState()); - resp.setZoneName(vol.getZoneName()); - return resp; - } - - throw new EC2ServiceException( ServerError.InternalError, "An unexpected error occurred." ); - } catch( Exception e ) { - logger.error( "EC2 DetachVolume - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Create a volume - * - * @param request - * @return - */ - public EC2Volume createVolume( EC2CreateVolume request ) { - try { - - CloudStackAccount caller = getCurrentAccount(); - // -> put either snapshotid or diskofferingid on the request - String snapshotId = request.getSnapshotId(); - Long size = request.getSize(); - String diskOfferingId = null; - - if (snapshotId == null) { - List disks = getApi().listDiskOfferings(null, null, null, null); - for (CloudStackDiskOffering offer : disks) { - if (offer.isCustomized()) { - diskOfferingId = offer.getId(); - } - } - if (diskOfferingId == null) throw new EC2ServiceException(ServerError.InternalError, "No Customize Disk Offering Found"); - } - -// // -> no volume name is given in the Amazon request but is required in the cloud API -<<<<<<< HEAD - CloudStackVolume vol = getApi().createVolume(UUID.randomUUID().toString(), null, diskOfferingId, null, size, snapshotId, toZoneId(request.getZoneName(), null)); -======= - CloudStackVolume vol = getApi().createVolume(UUID.randomUUID().toString(), null, diskOfferingId, null, size, snapshotId, toZoneId(request.getZoneName(), caller.getDomainId())); ->>>>>>> 6472e7b... Now really adding the renamed files! - if (vol != null) { - EC2Volume resp = new EC2Volume(); - resp.setAttached(vol.getAttached()); - resp.setCreated(vol.getCreated()); -// resp.setDevice(); - resp.setDeviceId(vol.getDeviceId()); - resp.setHypervisor(vol.getHypervisor()); - resp.setId(vol.getId()); - resp.setInstanceId(vol.getVirtualMachineId()); - resp.setSize(vol.getSize()); - resp.setSnapshotId(vol.getSnapshotId()); - resp.setState(vol.getState()); - resp.setType(vol.getVolumeType()); - resp.setVMState(vol.getVirtualMachineState()); - resp.setZoneName(vol.getZoneName()); - return resp; - } - return null; - } catch( Exception e ) { - logger.error( "EC2 CreateVolume - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Delete a volume - * - * @param request - * @return - */ - public EC2Volume deleteVolume( EC2Volume request ) { - try { - CloudStackInfoResponse resp = getApi().deleteVolume(request.getId()); - if(resp != null) { - request.setState("deleted"); - return request; - } - - throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); - } catch( Exception e ) { - logger.error( "EC2 DeleteVolume 2 - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Reboot an instance or instances - * - * @param request - * @return - */ - public boolean rebootInstances(EC2RebootInstances request) - { - EC2Instance[] vms = null; - - // -> reboot is not allowed on destroyed (i.e., terminated) instances - try { - String[] instanceSet = request.getInstancesSet(); - EC2DescribeInstancesResponse previousState = listVirtualMachines( instanceSet, null ); - vms = previousState.getInstanceSet(); - - // -> send reboot requests for each found VM - for (EC2Instance vm : vms) { - if (vm.getState().equalsIgnoreCase( "Destroyed" )) continue; - - CloudStackUserVm resp = getApi().rebootVirtualMachine(vm.getId()); - if (logger.isDebugEnabled()) - logger.debug("Rebooting VM " + resp.getId() + " job " + resp.getJobId()); - } - - // -> if some specified VMs where not found we have to tell the caller - if (instanceSet.length != vms.length) - throw new EC2ServiceException(ClientError.InvalidAMIID_NotFound, "One or more instanceIds do not exist, other instances rebooted."); - - return true; - } catch( Exception e ) { - logger.error( "EC2 RebootInstances - ", e ); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Using a template (AMI), launch n instances - * - * @param request - * @return - */ - public EC2RunInstancesResponse runInstances(EC2RunInstances request) { - EC2RunInstancesResponse instances = new EC2RunInstancesResponse(); - int createInstances = 0; - int canCreateInstances = -1; - int countCreated = 0; - - try { - CloudStackAccount caller = getCurrentAccount(); - - // ugly... - canCreateInstances = calculateAllowedInstances(); - if (-1 == canCreateInstances) canCreateInstances = request.getMaxCount(); - - if (canCreateInstances < request.getMinCount()) { - logger.info( "EC2 RunInstances - min count too big (" + request.getMinCount() + "), " + canCreateInstances + " left to allocate"); - throw new EC2ServiceException(ClientError.InstanceLimitExceeded ,"Only " + canCreateInstances + " instance(s) left to allocate"); - } - - if ( canCreateInstances < request.getMaxCount()) - createInstances = canCreateInstances; - else - createInstances = request.getMaxCount(); - -<<<<<<< HEAD - //find CS service Offering ID - String instanceType = "m1.small"; - if(request.getInstanceType() != null){ - instanceType = request.getInstanceType(); - } - CloudStackServiceOffering svcOffering = getCSServiceOfferingId(instanceType); - if(svcOffering == null){ - logger.info("No ServiceOffering found to be defined by name, please contact the administrator "+instanceType ); - throw new EC2ServiceException(ClientError.Unsupported, "instanceType: [" + instanceType + "] not found!"); - } - - // zone stuff - String zoneId = toZoneId(request.getZoneName(), null); -======= - // the mapping stuff - OfferingBundle offer = instanceTypeToOfferBundle( request.getInstanceType()); - - // zone stuff - String zoneId = toZoneId(request.getZoneName(), caller.getDomainId()); ->>>>>>> 6472e7b... Now really adding the renamed files! - - List zones = getApi().listZones(null, null, zoneId, null); - if (zones == null || zones.size() == 0) { - logger.info("EC2 RunInstances - zone [" + request.getZoneName() + "] not found!"); - throw new EC2ServiceException(ClientError.InvalidZone_NotFound, "ZoneId [" + request.getZoneName() + "] not found!"); - } - // we choose first zone? - CloudStackZone zone = zones.get(0); - - // network - CloudStackNetwork network = findNetwork(zone); - - // now actually deploy the vms - for( int i=0; i < createInstances; i++ ) { -<<<<<<< HEAD - CloudStackUserVm resp = getApi().deployVirtualMachine(svcOffering.getId(), -======= - CloudStackUserVm resp = getApi().deployVirtualMachine(offer.getServiceOfferingId(), ->>>>>>> 6472e7b... Now really adding the renamed files! - request.getTemplateId(), zoneId, null, null, null, null, - null, null, null, request.getKeyName(), null, (network != null ? network.getId() : null), - null, null, request.getSize().longValue(), request.getUserData()); - EC2Instance vm = new EC2Instance(); - vm.setId(resp.getId().toString()); - vm.setName(resp.getName()); - vm.setZoneName(resp.getZoneName()); - vm.setTemplateId(resp.getTemplateId().toString()); - if (resp.getSecurityGroupList() != null && resp.getSecurityGroupList().size() > 0) { - // TODO, we have a list of security groups, just return the first one? - CloudStackSecurityGroup securityGroup = resp.getSecurityGroupList().get(0); - vm.setGroup(securityGroup.getName()); - } - vm.setState(resp.getState()); - vm.setCreated(resp.getCreated()); - vm.setIpAddress(resp.getIpAddress()); - vm.setAccountName(resp.getAccountName()); - vm.setDomainId(resp.getDomainId()); - vm.setHypervisor(resp.getHypervisor()); -<<<<<<< HEAD - vm.setServiceOffering( svcOffering.getName()); -======= - vm.setServiceOffering( serviceOfferingIdToInstanceType( offer.getServiceOfferingId())); ->>>>>>> 6472e7b... Now really adding the renamed files! - instances.addInstance(vm); - countCreated++; - } - - if (0 == countCreated) { - // TODO, we actually need to destroy left-over VMs when the exception is thrown - throw new EC2ServiceException(ServerError.InsufficientInstanceCapacity, "Insufficient Instance Capacity" ); - } - - return instances; - } catch( Exception e ) { - logger.error( "EC2 RunInstances - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Start an instance or instances - * - * @param request - * @return - */ - public EC2StartInstancesResponse startInstances(EC2StartInstances request) { - EC2StartInstancesResponse instances = new EC2StartInstancesResponse(); - EC2Instance[] vms = null; - - // -> first determine the current state of each VM (becomes it previous state) - try { - EC2DescribeInstancesResponse previousState = listVirtualMachines( request.getInstancesSet(), null ); - vms = previousState.getInstanceSet(); - - // -> send start requests for each item - for (EC2Instance vm : vms) { - vm.setPreviousState(vm.getState()); - - // -> if its already running then we don't care - if (vm.getState().equalsIgnoreCase( "Running" ) || vm.getState().equalsIgnoreCase( "Destroyed" )) continue; - - CloudStackUserVm resp = getApi().startVirtualMachine(vm.getId()); -<<<<<<< HEAD - if(resp != null){ - vm.setState(resp.getState()); - if(logger.isDebugEnabled()) - logger.debug("Starting VM " + vm.getId() + " job " + resp.getJobId()); - } -======= - - if(logger.isDebugEnabled()) - logger.debug("Starting VM " + vm.getId() + " job " + resp.getJobId()); ->>>>>>> 6472e7b... Now really adding the renamed files! - instances.addInstance(vm); - } - return instances; - } catch( Exception e ) { - logger.error( "EC2 StartInstances - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Stop an instance or instances - * - * @param request - * @return - */ - public EC2StopInstancesResponse stopInstances(EC2StopInstances request) { - EC2StopInstancesResponse instances = new EC2StopInstancesResponse(); - EC2Instance[] virtualMachines = null; - - // -> first determine the current state of each VM (becomes it previous state) - try { - String[] instanceSet = request.getInstancesSet(); - - EC2DescribeInstancesResponse previousState = listVirtualMachines( instanceSet, null ); - virtualMachines = previousState.getInstanceSet(); - - // -> send stop requests for each item - for (EC2Instance vm : virtualMachines) { - vm.setPreviousState( vm.getState()); - CloudStackUserVm resp = null; - if (request.getDestroyInstances()) { - if (vm.getState().equalsIgnoreCase( "Destroyed" )) continue; - resp = getApi().destroyVirtualMachine(vm.getId()); - if(logger.isDebugEnabled()) - logger.debug("Destroying VM " + vm.getId() + " job " + resp.getJobId()); - } else { - if (vm.getState().equalsIgnoreCase("Stopped") || vm.getState().equalsIgnoreCase("Destroyed")) continue; - resp = getApi().stopVirtualMachine(vm.getId(), false); - if(logger.isDebugEnabled()) - logger.debug("Stopping VM " + vm.getId() + " job " + resp.getJobId()); - } -<<<<<<< HEAD - if (resp != null) { - vm.setState(resp.getState()); - instances.addInstance(vm); - } -======= - if (resp != null) instances.addInstance(vm); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - return instances; - } catch( Exception e ) { - logger.error( "EC2 StopInstances - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * RunInstances includes a min and max count of requested instances to create. - * We have to be able to create the min number for the user or none at all. So - * here we determine what the user has left to create. - * - * @return -1 means no limit exists, other positive numbers give max number left that - * the user can create. - */ - private int calculateAllowedInstances() throws Exception { - int maxAllowed = -1; - - CloudStackAccount ourAccount = getCurrentAccount(); - - if (ourAccount == null) { - // This should never happen, but - // we will return -99999 if this happens... - return -99999; - } - - // if accountType is Admin == 1, then let's return -1 - if (ourAccount.getAccountType() == 1) return -1; - - // -> get the user limits on instances - // "0" represents instances: - // http://download.cloud.com/releases/2.2.0/api_2.2.8/user/listResourceLimits.html - List limits = getApi().listResourceLimits(null, null, null, null, "0"); - if (limits != null && limits.size() > 0) { - maxAllowed = (int)limits.get(0).getMax().longValue(); - if (maxAllowed == -1) - return -1; // no limit - - EC2DescribeInstancesResponse existingVMS = listVirtualMachines( null, null ); - EC2Instance[] vmsList = existingVMS.getInstanceSet(); - return (maxAllowed - vmsList.length); - } else { - return 0; - } - } - - /** - * Performs the cloud API listVirtualMachines one or more times. - * - * @param virtualMachineIds - an array of instances we are interested in getting information on - * @param ifs - filter out unwanted instances - */ - private EC2DescribeInstancesResponse listVirtualMachines( String[] virtualMachineIds, EC2InstanceFilterSet ifs ) throws Exception - { - EC2DescribeInstancesResponse instances = new EC2DescribeInstancesResponse(); - - if (null == virtualMachineIds || 0 == virtualMachineIds.length) { - instances = lookupInstances( null, instances ); - } else { - for( int i=0; i < virtualMachineIds.length; i++ ) { - instances = lookupInstances( virtualMachineIds[i], instances ); - } - } - - if ( null == ifs ) - return instances; - else return ifs.evaluate( instances ); - } - - /** - * Get one or more templates depending on the volumeId parameter. - * - * @param volumeId - if interested in one specific volume, null if want to list all volumes - * @param instanceId - if interested in volumes for a specific instance, null if instance is not important - */ - private EC2DescribeVolumesResponse listVolumes(String volumeId, String instanceId, EC2DescribeVolumesResponse volumes)throws Exception { - - List vols = getApi().listVolumes(null, null, null, volumeId, null, null, null, null, null, instanceId, null); - if(vols != null && vols.size() > 0) { - for(CloudStackVolume vol : vols) { - EC2Volume ec2Vol = new EC2Volume(); - ec2Vol.setId(vol.getId()); - if(vol.getAttached() != null) - ec2Vol.setAttached(vol.getAttached()); - ec2Vol.setCreated(vol.getCreated()); - - if(vol.getDeviceId() != null) - ec2Vol.setDeviceId(vol.getDeviceId()); - ec2Vol.setHypervisor(vol.getHypervisor()); - - if(vol.getSnapshotId() != null) - ec2Vol.setSnapshotId(vol.getSnapshotId()); - ec2Vol.setState(mapToAmazonVolState(vol.getState())); - ec2Vol.setSize(vol.getSize()); - ec2Vol.setType(vol.getVolumeType()); - - if(vol.getVirtualMachineId() != null) - ec2Vol.setInstanceId(vol.getVirtualMachineId()); - - if(vol.getVirtualMachineState() != null) - ec2Vol.setVMState(vol.getVirtualMachineState()); - ec2Vol.setZoneName(vol.getZoneName()); - - volumes.addVolume(ec2Vol); - } - } - - return volumes; - } - - /** - * Translate the given zone name into the required zoneId. Query for - * a list of all zones and match the zone name given. Amazon uses zone - * names while the Cloud API often requires the zoneId. - * - * @param zoneName - (e.g., 'AH'), if null return the first zone in the available list - * - * @return the zoneId that matches the given zone name - */ - private String toZoneId(String zoneName, String domainId) throws Exception { - EC2DescribeAvailabilityZonesResponse zones = null; - String[] interestedZones = null; - - if ( null != zoneName) { - interestedZones = new String[1]; - interestedZones[0] = zoneName; - } - zones = listZones(interestedZones, domainId); - - if (zones == null || zones.getZoneIdAt( 0 ) == null) - throw new EC2ServiceException(ClientError.InvalidParameterValue, "Unknown zoneName value - " + zoneName); - return zones.getZoneIdAt(0); - } - -<<<<<<< HEAD - - /** - * Convert from the Amazon instanceType strings to Cloud serviceOfferingId - * - */ - - private CloudStackServiceOffering getCSServiceOfferingId(String instanceType){ - try { - if (null == instanceType) instanceType = "m1.small"; - - CloudStackSvcOfferingDao dao = new CloudStackSvcOfferingDao(); - return dao.getSvcOfferingByName(instanceType); - - } catch(Exception e) { - logger.error( "Error while retrieving ServiceOffering information by name - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - -======= - /** - * Convert from the Amazon instanceType strings to the Cloud APIs diskOfferingId and - * serviceOfferingId based on the loaded map. - * - * @param instanceType - if null we return the M1Small instance type - * - * @return an OfferingBundle - * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException - */ - private OfferingBundle instanceTypeToOfferBundle( String instanceType ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { - OfferingBundle found = null; - OfferingDao ofDao = new OfferingDao(); - - if (null == instanceType) instanceType = "m1.small"; - String cloudOffering = ofDao.getCloudOffering( instanceType ); - - if ( null != cloudOffering ) - { - found = new OfferingBundle(); - found.setServiceOfferingId( cloudOffering ); - } - else throw new EC2ServiceException( ClientError.Unsupported, "Unknown: " + instanceType ); - - return found; - } - ->>>>>>> 6472e7b... Now really adding the renamed files! - /** - * Convert from the Cloud serviceOfferingId to the Amazon instanceType strings based - * on the loaded map. - * - * @param serviceOfferingId - * @return A valid value for the Amazon defined instanceType - * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException - */ -<<<<<<< HEAD - private String serviceOfferingIdToInstanceType( String serviceOfferingId ){ - try{ - CloudStackSvcOfferingDao dao = new CloudStackSvcOfferingDao(); - CloudStackServiceOffering offering = dao.getSvcOfferingById(serviceOfferingId); - if(offering == null){ - logger.warn( "No instanceType match for serviceOfferingId: [" + serviceOfferingId + "]" ); - return "m1.small"; - } - return offering.getName(); - } - catch(Exception e) { - logger.error( "sError while retrieving ServiceOffering information by id - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } -======= - private String serviceOfferingIdToInstanceType( String serviceOfferingId ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException { - OfferingDao ofDao = new OfferingDao(); - String amazonOffering = ofDao.getAmazonOffering( serviceOfferingId.trim()); - - if ( null == amazonOffering ) { - logger.warn( "No instanceType match for serverOfferingId: [" + serviceOfferingId + "]" ); - return "m1.small"; - } - else return amazonOffering; ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - /** - * Match the value in the 'description' field of the listOsTypes response to get - * the osTypeId. - * - * @param osTypeName - * @return the Cloud.com API osTypeId - */ - private String toOSTypeId( String osTypeName ) throws Exception { - try { - List osTypes = getApi().listOsTypes(null, null, null); - for (CloudStackOsType osType : osTypes) { - if (osType.getDescription().toLowerCase().indexOf(osTypeName.toLowerCase()) != -1) - return osType.getId(); - } - return null; - } catch(Exception e) { - logger.error( "List OS Types - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - - } - - /** - * More than one place we need to access the defined list of zones. If given a specific - * list of zones of interest, then only values from those zones are returned. - * - * @param interestedZones - can be null, should be a subset of all zones - * - * @return EC2DescribeAvailabilityZonesResponse - */ - private EC2DescribeAvailabilityZonesResponse listZones(String[] interestedZones, String domainId) throws Exception - { - EC2DescribeAvailabilityZonesResponse zones = new EC2DescribeAvailabilityZonesResponse(); - - List cloudZones = getApi().listZones(true, domainId, null, null); - - if(cloudZones != null) { - for(CloudStackZone cloudZone : cloudZones) { - if ( null != interestedZones && 0 < interestedZones.length ) { - for( int j=0; j < interestedZones.length; j++ ) { - if (interestedZones[j].equalsIgnoreCase( cloudZone.getName())) { - zones.addZone(cloudZone.getId().toString(), cloudZone.getName()); - break; - } - } - } else { - zones.addZone(cloudZone.getId().toString(), cloudZone.getName()); - } - } - } - return zones; - } - - - /** - * Get information on one or more virtual machines depending on the instanceId parameter. - * - * @param instanceId - if null then return information on all existing instances, otherwise - * just return information on the matching instance. - * @param instances - a container object to fill with one or more EC2Instance objects - * - * @return the same object passed in as the "instances" parameter modified with one or more - * EC2Instance objects loaded. - */ - private EC2DescribeInstancesResponse lookupInstances( String instanceId, EC2DescribeInstancesResponse instances ) - throws Exception { - - String instId = instanceId != null ? instanceId : null; - List vms = getApi().listVirtualMachines(null, null, null, null, null, null, - instId, null, null, null, null, null, null, null, null); - - if(vms != null && vms.size() > 0) { - for(CloudStackUserVm cloudVm : vms) { - EC2Instance ec2Vm = new EC2Instance(); - - ec2Vm.setId(cloudVm.getId().toString()); - ec2Vm.setName(cloudVm.getName()); - ec2Vm.setZoneName(cloudVm.getZoneName()); - ec2Vm.setTemplateId(cloudVm.getTemplateId().toString()); - ec2Vm.setGroup(cloudVm.getGroup()); - ec2Vm.setState(cloudVm.getState()); - ec2Vm.setCreated(cloudVm.getCreated()); - ec2Vm.setIpAddress(cloudVm.getIpAddress()); - ec2Vm.setAccountName(cloudVm.getAccountName()); - ec2Vm.setDomainId(cloudVm.getDomainId()); - ec2Vm.setHypervisor(cloudVm.getHypervisor()); - ec2Vm.setRootDeviceType(cloudVm.getRootDeviceType()); - ec2Vm.setRootDeviceId(cloudVm.getRootDeviceId()); - ec2Vm.setServiceOffering(serviceOfferingIdToInstanceType(cloudVm.getServiceOfferingId().toString())); - - List nics = cloudVm.getNics(); - for(CloudStackNic nic : nics) { - if(nic.getIsDefault()) { - ec2Vm.setPrivateIpAddress(nic.getIpaddress()); - break; - } - } - instances.addInstance(ec2Vm); - } - } - return instances; - } - - - /** - * Get one or more templates depending on the templateId parameter. - * - * @param templateId - if null then return information on all existing templates, otherwise - * just return information on the matching template. - * @param images - a container object to fill with one or more EC2Image objects - * - * @return the same object passed in as the "images" parameter modified with one or more - * EC2Image objects loaded. - */ - private EC2DescribeImagesResponse listTemplates( String templateId, EC2DescribeImagesResponse images ) throws EC2ServiceException { - try { -<<<<<<< HEAD - List result = new ArrayList(); - - if(templateId != null){ - List template = getApi().listTemplates("executable", null, null, null, templateId , null, null, null); - if(template != null){ - result.addAll(template); - } - }else{ - List selfExecutable = getApi().listTemplates("selfexecutable", null, null, null, null, null, null, null); - if(selfExecutable != null){ - result.addAll(selfExecutable); - } - - List featured = getApi().listTemplates("featured", null, null, null, null, null, null, null); - if(featured != null){ - result.addAll(featured); - } - - List sharedExecutable = getApi().listTemplates("sharedexecutable", null, null, null, null, null, null, null); - if(sharedExecutable != null){ - result.addAll(sharedExecutable); - } - - List community = getApi().listTemplates("community", null, null, null, null, null, null, null); - if(community != null){ - result.addAll(community); - } - } - - if (result != null && result.size() > 0) { - for (CloudStackTemplate temp : result) { -======= - List resp = getApi().listTemplates("executable", null, null, null, templateId != null ? templateId : null, null, null, null); - if (resp != null && resp.size() > 0) { - for (CloudStackTemplate temp : resp) { ->>>>>>> 6472e7b... Now really adding the renamed files! - EC2Image ec2Image = new EC2Image(); - ec2Image.setId(temp.getId().toString()); - ec2Image.setAccountName(temp.getAccount()); - ec2Image.setName(temp.getName()); - ec2Image.setDescription(temp.getDisplayText()); - ec2Image.setOsTypeId(temp.getOsTypeId().toString()); - ec2Image.setIsPublic(temp.getIsPublic()); - ec2Image.setIsReady(temp.getIsReady()); - ec2Image.setDomainId(temp.getDomainId()); - images.addImage(ec2Image); - } - } - return images; - } catch(Exception e) { - logger.error( "List Templates - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * List security groups - * - * @param interestedGroups - * @return - * @throws EC2ServiceException - * @throws UnsupportedEncodingException - * @throws SignatureException - * @throws IOException - * @throws SAXException - * @throws ParserConfigurationException - * @throws ParseException - */ - public EC2DescribeSecurityGroupsResponse listSecurityGroups( String[] interestedGroups ) throws Exception { - try { - EC2DescribeSecurityGroupsResponse groupSet = new EC2DescribeSecurityGroupsResponse(); - - List groups = getApi().listSecurityGroups(null, null, null, null, null, null); - if (groups != null && groups.size() > 0) - for (CloudStackSecurityGroup group : groups) { - boolean matched = false; - if (interestedGroups.length > 0) { - for (String groupName :interestedGroups) { - if (groupName.equalsIgnoreCase(group.getName())) { - matched = true; - break; - } - } - } else { - matched = true; - } - if (!matched) continue; - EC2SecurityGroup ec2Group = new EC2SecurityGroup(); - // not sure if we should set both account and account name to accountname - ec2Group.setAccount(group.getAccountName()); - ec2Group.setAccountName(group.getAccountName()); - ec2Group.setName(group.getName()); - ec2Group.setDescription(group.getDescription()); - ec2Group.setDomainId(group.getDomainId()); - ec2Group.setId(group.getId().toString()); - toPermission(ec2Group, group); - - groupSet.addGroup(ec2Group); - } - return groupSet; - } catch(Exception e) { - logger.error( "List Security Groups - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - - /** - * Convert ingress rule to EC2IpPermission records - * - * @param response - * @param group - * @return - */ - private boolean toPermission(EC2SecurityGroup response, CloudStackSecurityGroup group ) { - List rules = group.getIngressRules(); - - if (rules == null || rules.isEmpty()) return false; - - for (CloudStackIngressRule rule : rules) { - EC2IpPermission perm = new EC2IpPermission(); - perm.setProtocol(rule.getProtocol()); - perm.setFromPort(rule.getStartPort()); - perm.setToPort(rule.getEndPort()); - perm.setRuleId(rule.getRuleId() != null ? rule.getRuleId().toString() : new String()); - perm.setIcmpCode(rule.getIcmpCode() != null ? rule.getIcmpCode().toString() : new String()); - perm.setIcmpType(rule.getIcmpType() != null ? rule.getIcmpType().toString() : new String()); - perm.setCIDR(rule.getCidr()); - perm.addIpRange(rule.getCidr()); - - if (rule.getAccountName() != null && rule.getSecurityGroupName() != null) { - EC2SecurityGroup newGroup = new EC2SecurityGroup(); - newGroup.setAccount(rule.getAccountName()); - newGroup.setName(rule.getSecurityGroupName()); - perm.addUser(newGroup); - } - response.addIpPermission(perm); - } - return true; - } - - /** - * Find the current account based on the SecretKey - * - * @return - * @throws Exception - */ -<<<<<<< HEAD - public CloudStackAccount getCurrentAccount() throws Exception { -======= - private CloudStackAccount getCurrentAccount() throws Exception { ->>>>>>> 6472e7b... Now really adding the renamed files! - if (currentAccount != null) { - // verify this is the same account!!! - for (CloudStackUser user : currentAccount.getUser()) { - if (user.getSecretkey() != null && user.getSecretkey().equalsIgnoreCase(UserContext.current().getSecretKey())) { - return currentAccount; - } - } - } - // otherwise let's find this user/account - List accounts = getApi().listAccounts(null, null, null, null, null, null, null, null); - for (CloudStackAccount account : accounts) { - CloudStackUser[] users = account.getUser(); - for (CloudStackUser user : users) { - String userSecretKey = user.getSecretkey(); - if (userSecretKey != null && userSecretKey.equalsIgnoreCase(UserContext.current().getSecretKey())) { - currentAccount = account; - return account; - } - } - } - // if we get here, there is something wrong... - return null; - } - - /** - * List networkOfferings by zone with securityGroup enabled - * - * @param zoneId - * @return - * @throws Exception - */ - private CloudStackNetwork getNetworksWithSecurityGroupEnabled(String zoneId) throws Exception { - List networks = getApi().listNetworks(null, null, null, null, null, null, null, null, null, zoneId); - List netWithSecGroup = new ArrayList(); - for (CloudStackNetwork network : networks ) { - if (!network.getNetworkOfferingAvailability().equalsIgnoreCase("unavailable") && network.getSecurityGroupEnabled()) - netWithSecGroup.add(network); - } - // we'll take the first one - return netWithSecGroup.get(0); - } - - /** - * Create a network - * - * @param zoneId - * @param offering - * @param owner - * @return - * @throws Exception - */ -<<<<<<< HEAD - private CloudStackNetwork createDefaultGuestNetwork(String zoneId, CloudStackNetworkOffering offering, CloudStackAccount owner) throws Exception { - return getApi().createNetwork(owner.getName() + "-network", owner.getName() + "-network", offering.getId(), zoneId, owner.getName(), - owner.getDomainId(), true, null, null, null, null, null, null, null, null); -======= - private CloudStackNetwork createNetwork(String zoneId, CloudStackNetworkOffering offering, CloudStackAccount owner) throws Exception { - return getApi().createNetwork(owner.getName() + "-network", owner.getName() + "-network", offering.getId(), zoneId, owner.getName(), - null, null, null, null, null, null, null, null, null, null); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - /** - * List of networks without securityGroup enabled by zone - * - * @param zoneId - * @return - * @throws Exception - */ - private CloudStackNetwork getNetworksWithoutSecurityGroupEnabled(String zoneId) throws Exception { - // grab current account - CloudStackAccount caller = getCurrentAccount(); -<<<<<<< HEAD - - //check if account has any networks in the system - List networks = getApi().listNetworks(caller.getName(), caller.getDomainId(), null, true, null, null, null, null, null, zoneId); - - //listRequired offerings in the system - the network created from this offering has to be specified in deployVm command - List reuquiredOfferings = getApi().listNetworkOfferings("Required", null, null, null, true, null, null, null, null, null, zoneId); - if (reuquiredOfferings != null && !reuquiredOfferings.isEmpty()) { - if (networks != null && !networks.isEmpty()) { - //pick up the first required network from the network list - for (CloudStackNetwork network : networks) { - for (CloudStackNetworkOffering requiredOffering : reuquiredOfferings) { - logger.debug("[reqd/virtual} offering: " + requiredOffering.getId() + " network " + network.getNetworkOfferingId()); - if (network.getNetworkOfferingId().equals(requiredOffering.getId())) { - return network; - } - } - } - } else { - //create new network and return it - return createDefaultGuestNetwork(zoneId, reuquiredOfferings.get(0), caller); - } - } else { - //find all optional network offerings in the system - List optionalOfferings = getApi().listNetworkOfferings("Optional", null, null, null, true, null, null, null, null, null, zoneId); - if (optionalOfferings != null && !optionalOfferings.isEmpty()) { - if (networks != null && !networks.isEmpty()) { - for (CloudStackNetwork network : networks) { - for (CloudStackNetworkOffering optionalOffering : optionalOfferings) { - logger.debug("[optional] offering: " + optionalOffering.getId() + " network " + network.getNetworkOfferingId()); - if (network.getNetworkOfferingId().equals(optionalOffering.getId())) { - return network; - } - } - } - } - } - } - -======= - - List networks = getApi().listNetworks(null, caller.getDomainId(), null, null, null, null, null, null, null, zoneId); - - List offerings = getApi().listNetworkOfferings("Required", null, null, null, true, null, null, null, null, null, zoneId); - if (offerings != null && !offerings.isEmpty()) { - for (CloudStackNetwork network : networks) - for (CloudStackNetworkOffering offering : offerings) { - logger.debug("[reqd/virtual} offering: " + offering.getId() + " network " + network.getNetworkOfferingId()); - if (network.getNetworkOfferingId().equals(offering.getId())) - return network; - } - // if we get this far, we didn't find a network, so create one and return it. - return createNetwork(zoneId, offerings.get(0), caller); - } - offerings = getApi().listNetworkOfferings("Optional", null, null, null, true, null, null, null, null, null, zoneId); - if (offerings != null && !offerings.isEmpty()) { - for (CloudStackNetwork network : networks) - for (CloudStackNetworkOffering offering : offerings) { - logger.debug("[optional] offering: " + offering.getId() + " network " + network.getNetworkOfferingId()); - if (network.getNetworkOfferingId().equals(offering.getId())) - return network; - } - } ->>>>>>> 6472e7b... Now really adding the renamed files! - // if we get this far and haven't returned already return an error - throw new EC2ServiceException(ServerError.InternalError, "Unable to find an appropriate network for account " + caller.getName()); - } - - /** - * Find a suitable network to use for deployVM - * - * @param zone - * @return - * @throws Exception - */ - private CloudStackNetwork findNetwork(CloudStackZone zone) throws Exception { - if (zone == null) return null; - - // for basic networking, we don't specify a networkid for deployvm - if (zone.getNetworkType().equalsIgnoreCase("basic")) return null; - - if (zone.getSecurityGroupsEnabled()) { - // find system security group enabled network - return getNetworksWithSecurityGroupEnabled(zone.getId()); - - } else { - return getNetworksWithoutSecurityGroupEnabled(zone.getId()); - } - } - - private CloudStackZone findZone() throws Exception { - CloudStackAccount caller = getCurrentAccount(); - // caller.getDomainId doesn't work in user mode -// List cloudZones = getApi().listZones(true, caller.getDomainId(), null, null); - List cloudZones = getApi().listZones(true, null, null, null); - if (cloudZones != null && cloudZones.size() > 0) { - return cloudZones.get(0); - } - return null; - } - - /** - * Windows has its own device strings. - * - * @param hypervisor - * @param deviceId - * @return - */ - public String cloudDeviceIdToDevicePath( String hypervisor, String deviceId ) - { - Integer devId = new Integer(deviceId); - if (null != hypervisor && hypervisor.toLowerCase().contains( "windows" )) { - switch( devId ) { - case 1: return "xvdb"; - case 2: return "xvdc"; - case 3: return "xvdd"; - case 4: return "xvde"; - case 5: return "xvdf"; - case 6: return "xvdg"; - case 7: return "xvdh"; - case 8: return "xvdi"; - case 9: return "xvdj"; - default: return new String( "" + deviceId ); - } - } else { // -> assume its unix - switch( devId ) { - case 1: return "/dev/sdb"; - case 2: return "/dev/sdc"; - case 3: return "/dev/sdd"; - case 4: return "/dev/sde"; - case 5: return "/dev/sdf"; - case 6: return "/dev/sdg"; - case 7: return "/dev/sdh"; - case 8: return "/dev/sdi"; - case 9: return "/dev/sdj"; - default: return new String( "" + deviceId ); - } - } - } - - - /** - * Translate the device name string into a Cloud Stack deviceId. - * deviceId 3 is reserved for CDROM and 0 for the ROOT disk - * - * @param device string - * @return deviceId value - */ - private String mapDeviceToCloudDeviceId( String device ) - { - if (device.equalsIgnoreCase( "/dev/sdb" )) return "1"; - else if (device.equalsIgnoreCase( "/dev/sdc" )) return "2"; - else if (device.equalsIgnoreCase( "/dev/sde" )) return "4"; - else if (device.equalsIgnoreCase( "/dev/sdf" )) return "5"; - else if (device.equalsIgnoreCase( "/dev/sdg" )) return "6"; - else if (device.equalsIgnoreCase( "/dev/sdh" )) return "7"; - else if (device.equalsIgnoreCase( "/dev/sdi" )) return "8"; - else if (device.equalsIgnoreCase( "/dev/sdj" )) return "9"; - - else if (device.equalsIgnoreCase( "/dev/xvdb" )) return "1"; - else if (device.equalsIgnoreCase( "/dev/xvdc" )) return "2"; - else if (device.equalsIgnoreCase( "/dev/xvde" )) return "4"; - else if (device.equalsIgnoreCase( "/dev/xvdf" )) return "5"; - else if (device.equalsIgnoreCase( "/dev/xvdg" )) return "6"; - else if (device.equalsIgnoreCase( "/dev/xvdh" )) return "7"; - else if (device.equalsIgnoreCase( "/dev/xvdi" )) return "8"; - else if (device.equalsIgnoreCase( "/dev/xvdj" )) return "9"; - - else if (device.equalsIgnoreCase( "xvdb" )) return "1"; - else if (device.equalsIgnoreCase( "xvdc" )) return "2"; - else if (device.equalsIgnoreCase( "xvde" )) return "4"; - else if (device.equalsIgnoreCase( "xvdf" )) return "5"; - else if (device.equalsIgnoreCase( "xvdg" )) return "6"; - else if (device.equalsIgnoreCase( "xvdh" )) return "7"; - else if (device.equalsIgnoreCase( "xvdi" )) return "8"; - else if (device.equalsIgnoreCase( "xvdj" )) return "9"; - - else throw new EC2ServiceException( ClientError.Unsupported, device + " is not supported" ); - } - - /** - * Map CloudStack instance state to Amazon state strings - * - * @param state - * @return - */ - private String mapToAmazonVolState( String state ) - { - if (state.equalsIgnoreCase( "Allocated" ) || - state.equalsIgnoreCase( "Creating" ) || - state.equalsIgnoreCase( "Ready" )) return "available"; - - if (state.equalsIgnoreCase( "Destroy" )) return "deleting"; - - return "error"; - } - - /** - * Stop an instance - * Wait until one specific VM has stopped - * - * @param instanceId - * @return - * @throws Exception - */ - private boolean stopVirtualMachine( String instanceId) throws Exception { - try { - CloudStackUserVm resp = getApi().stopVirtualMachine(instanceId, false); - if (logger.isDebugEnabled()) - logger.debug("Stopping VM " + instanceId ); - return resp != null; - } catch(Exception e) { - logger.error( "StopVirtualMachine - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - /** - * Start an existing stopped instance(VM) - * - * @param instanceId - * @return - * @throws Exception - */ - private boolean startVirtualMachine( String instanceId ) throws Exception { - try { - CloudStackUserVm resp = getApi().startVirtualMachine(instanceId); - if (logger.isDebugEnabled()) - logger.debug("Starting VM " + instanceId ); - return resp != null; - } catch(Exception e) { - logger.error("StartVirtualMachine - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.ec2; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.security.SignatureException; +import java.sql.SQLException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.UUID; + +import javax.xml.parsers.ParserConfigurationException; + +import org.apache.log4j.Logger; +import org.xml.sax.SAXException; + +import com.cloud.bridge.persist.dao.CloudStackSvcOfferingDao; +import com.cloud.bridge.persist.dao.OfferingDao; +import com.cloud.bridge.service.UserContext; +import com.cloud.bridge.service.exception.EC2ServiceException; +import com.cloud.bridge.service.exception.EC2ServiceException.ClientError; +import com.cloud.bridge.service.exception.EC2ServiceException.ServerError; +import com.cloud.bridge.util.ConfigurationHelper; +import com.cloud.stack.CloudStackApi; +import com.cloud.stack.models.CloudStackAccount; +import com.cloud.stack.models.CloudStackDiskOffering; +import com.cloud.stack.models.CloudStackInfoResponse; +import com.cloud.stack.models.CloudStackIngressRule; +import com.cloud.stack.models.CloudStackIpAddress; +import com.cloud.stack.models.CloudStackKeyPair; +import com.cloud.stack.models.CloudStackKeyValue; +import com.cloud.stack.models.CloudStackNetwork; +import com.cloud.stack.models.CloudStackNetworkOffering; +import com.cloud.stack.models.CloudStackNic; +import com.cloud.stack.models.CloudStackOsType; +import com.cloud.stack.models.CloudStackPasswordData; +import com.cloud.stack.models.CloudStackResourceLimit; +import com.cloud.stack.models.CloudStackSecurityGroup; +import com.cloud.stack.models.CloudStackSecurityGroupIngress; +import com.cloud.stack.models.CloudStackServiceOffering; +import com.cloud.stack.models.CloudStackSnapshot; +import com.cloud.stack.models.CloudStackTemplate; +import com.cloud.stack.models.CloudStackUser; +import com.cloud.stack.models.CloudStackUserVm; +import com.cloud.stack.models.CloudStackVolume; +import com.cloud.stack.models.CloudStackZone; + +/** + * EC2Engine processes the ec2 commands and calls their cloudstack analogs + * + */ +public class EC2Engine { + protected final static Logger logger = Logger.getLogger(EC2Engine.class); + String managementServer = null; + String cloudAPIPort = null; + + private CloudStackApi _eng = null; + + private CloudStackAccount currentAccount = null; + + public EC2Engine() throws IOException { + loadConfigValues(); + } + + /** + * Which management server to we talk to? + * Load a mapping form Amazon values for 'instanceType' to cloud defined + * diskOfferingId and serviceOfferingId. + * + * @throws IOException + */ + private void loadConfigValues() throws IOException { + File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); + if (null != propertiesFile) { + logger.info("Use EC2 properties file: " + propertiesFile.getAbsolutePath()); + Properties EC2Prop = new Properties(); + try { + EC2Prop.load( new FileInputStream( propertiesFile )); + } catch (FileNotFoundException e) { + logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); + } catch (IOException e) { + logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); + } + managementServer = EC2Prop.getProperty( "managementServer" ); + cloudAPIPort = EC2Prop.getProperty( "cloudAPIPort", null ); + + OfferingDao ofDao = new OfferingDao(); + try { + if(ofDao.getOfferingCount() == 0) { + String strValue = EC2Prop.getProperty("m1.small.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m1.small", strValue); + + strValue = EC2Prop.getProperty("m1.large.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m1.large", strValue); + + strValue = EC2Prop.getProperty("m1.xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m1.xlarge", strValue); + + strValue = EC2Prop.getProperty("c1.medium.serviceId"); + if(strValue != null) ofDao.setOfferMapping("c1.medium", strValue); + + strValue = EC2Prop.getProperty("c1.xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("c1.xlarge", strValue); + + strValue = EC2Prop.getProperty("m2.xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m2.xlarge", strValue); + + strValue = EC2Prop.getProperty("m2.2xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m2.2xlarge", strValue); + + strValue = EC2Prop.getProperty("m2.4xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m2.4xlarge", strValue); + + strValue = EC2Prop.getProperty("cc1.4xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("cc1.4xlarge", strValue); + } + } catch(Exception e) { + logger.error("Unexpected exception ", e); + } + } else logger.error( "ec2-service.properties not found" ); + } + + /** + * Helper function to manage the api connection + * + * @return + */ + private CloudStackApi getApi() { + if (_eng == null) { + _eng = new CloudStackApi(managementServer, cloudAPIPort, false); + } + // regardless of whether _eng is initialized, we must make sure + // access/secret keys are current with what's in the UserCredentials + _eng.setApiKey(UserContext.current().getAccessKey()); + _eng.setSecretKey(UserContext.current().getSecretKey()); + return _eng; + } + + + /** + * Verifies account can access CloudStack + * + * @param accessKey + * @param secretKey + * @return + * @throws EC2ServiceException + */ + public boolean validateAccount( String accessKey, String secretKey ) throws EC2ServiceException { + String oldApiKey = null; + String oldSecretKey = null; + + if (accessKey == null || secretKey == null) { + return false; + } + + // okay, instead of using the getApi() nonsense for validate, we are going to manage _eng + if (_eng == null) { + _eng = new CloudStackApi(managementServer, cloudAPIPort, false); + } + + try { + oldApiKey = _eng.getApiKey(); + oldSecretKey = _eng.getSecretKey(); + } catch(Exception e) { + // we really don't care, and expect this + } + try { + _eng.setApiKey(accessKey); + _eng.setSecretKey(secretKey); + List accts = _eng.listAccounts(null, null, null, null, null, null, null, null); + if (oldApiKey != null && oldSecretKey != null) { + _eng.setApiKey(oldApiKey); + _eng.setSecretKey(oldSecretKey); + } + if (accts == null) { + return false; + } + return true; + } catch(Exception e) { + logger.error("Validate account failed!"); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Creates a security group + * + * @param groupName + * @param groupDesc + * @return + */ + public Boolean createSecurityGroup(String groupName, String groupDesc) { + try { + CloudStackSecurityGroup grp = getApi().createSecurityGroup(groupName, null, groupDesc, null); + if (grp != null && grp.getId() != null) { + return true; + } + return false; + } catch( Exception e ) { + logger.error( "EC2 CreateSecurityGroup - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Deletes a security group + * + * @param groupName + * @return + */ + public boolean deleteSecurityGroup(String groupName) { + try { + CloudStackInfoResponse resp = getApi().deleteSecurityGroup(null, null, null, groupName); + if (resp != null) { + return resp.getSuccess(); + } + return false; + } catch( Exception e ) { + logger.error( "EC2 DeleteSecurityGroup - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * returns a list of security groups + * + * @param request + * @return + */ + public EC2DescribeSecurityGroupsResponse describeSecurityGroups(EC2DescribeSecurityGroups request) + { + try { + EC2DescribeSecurityGroupsResponse response = listSecurityGroups( request.getGroupSet()); + EC2GroupFilterSet gfs = request.getFilterSet(); + + if ( null == gfs ) + return response; + else return gfs.evaluate( response ); + } catch( Exception e ) { + logger.error( "EC2 DescribeSecurityGroups - ", e); + throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); + } + } + + /** + * CloudStack supports revoke only by using the ruleid of the ingress rule. + * We list all security groups and find the matching group and use the first ruleId we find. + * + * @param request + * @return + */ + public boolean revokeSecurityGroup( EC2AuthorizeRevokeSecurityGroup request ) + { + if (null == request.getName()) throw new EC2ServiceException(ServerError.InternalError, "Name is a required parameter"); + try { + String[] groupSet = new String[1]; + groupSet[0] = request.getName(); + String ruleId = null; + + EC2IpPermission[] items = request.getIpPermissionSet(); + + EC2DescribeSecurityGroupsResponse response = listSecurityGroups( groupSet ); + EC2SecurityGroup[] groups = response.getGroupSet(); + + for (EC2SecurityGroup group : groups) { + EC2IpPermission[] perms = group.getIpPermissionSet(); + for (EC2IpPermission perm : perms) { + ruleId = doesRuleMatch( items[0], perm ); + } + } + + if (null == ruleId) + throw new EC2ServiceException(ClientError.InvalidGroup_NotFound, "Cannot find matching ruleid."); + + CloudStackInfoResponse resp = getApi().revokeSecurityGroupIngress(ruleId); + if (resp != null && resp.getId() != null) { + return resp.getSuccess(); + } + return false; + } catch( Exception e ) { + logger.error( "EC2 revokeSecurityGroupIngress" + " - " + e.getMessage()); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * authorizeSecurityGroup + * + * @param request - ip permission parameters + */ + public boolean authorizeSecurityGroup(EC2AuthorizeRevokeSecurityGroup request ) + { + if (null == request.getName()) throw new EC2ServiceException(ServerError.InternalError, "Name is a required parameter"); + + EC2IpPermission[] items = request.getIpPermissionSet(); + + try { + for (EC2IpPermission ipPerm : items) { + EC2SecurityGroup[] groups = ipPerm.getUserSet(); + + List secGroupList = new ArrayList(); + for (EC2SecurityGroup group : groups) { + CloudStackKeyValue pair = new CloudStackKeyValue(); + pair.setKeyValue(group.getAccount(), group.getName()); + secGroupList.add(pair); + } + CloudStackSecurityGroupIngress resp = null; + if (ipPerm.getProtocol().equalsIgnoreCase("icmp")) { + resp = getApi().authorizeSecurityGroupIngress(null, constructCIDRList(ipPerm.getIpRangeSet()), null, null, + ipPerm.getToPort().toString(), ipPerm.getFromPort().toString(), ipPerm.getProtocol(), null, + request.getName(), null, secGroupList); + } else { + resp = getApi().authorizeSecurityGroupIngress(null, constructCIDRList(ipPerm.getIpRangeSet()), null, + ipPerm.getToPort().longValue(), null, null, ipPerm.getProtocol(), null, request.getName(), + ipPerm.getFromPort().longValue(), secGroupList); + } + if (resp != null && resp.getRuleId() != null) { + return true; + } + return false; + } + } catch(Exception e) { + logger.error( "EC2 AuthorizeSecurityGroupIngress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + return true; + } + + /** + * Does the permission from the request (left) match the permission from the cloudStack query (right). + * If the cloudStack rule matches then we return its ruleId. + * + * @param permLeft + * @param permRight + * @return ruleId of the cloudstack rule + */ + private String doesRuleMatch(EC2IpPermission permLeft, EC2IpPermission permRight) + { + int matches = 0; + + if (null != permLeft.getIcmpType() && null != permLeft.getIcmpCode()) { + if (null == permRight.getIcmpType() || null == permRight.getIcmpCode()) return null; + + if (!permLeft.getIcmpType().equalsIgnoreCase( permRight.getIcmpType())) return null; + if (!permLeft.getIcmpCode().equalsIgnoreCase( permRight.getIcmpCode())) return null; + matches++; + } + + // -> "Valid Values for EC2 security groups: tcp | udp | icmp or the corresponding protocol number (6 | 17 | 1)." + if (null != permLeft.getProtocol()) { + if (null == permRight.getProtocol()) return null; + + String protocol = permLeft.getProtocol(); + if (protocol.equals( "6" )) protocol = "tcp"; + else if (protocol.equals( "17" )) protocol = "udp"; + else if (protocol.equals( "1" )) protocol = "icmp"; + + if (!protocol.equalsIgnoreCase( permRight.getProtocol())) return null; + matches++; + } + + + if (null != permLeft.getCIDR()) { + if (null == permRight.getCIDR()) return null; + + if (!permLeft.getCIDR().equalsIgnoreCase( permRight.getCIDR())) return null; + matches++; + } + + // -> is the port(s) from the request (left) a match of the rule's port(s) + if (0 != permLeft.getFromPort()) { + // -> -1 means all ports match + if (-1 != permLeft.getFromPort()) { + if (permLeft.getFromPort().compareTo(permRight.getFromPort()) != 0 || + permLeft.getToPort().compareTo(permRight.getToPort()) != 0) + return null; + } + matches++; + } + + + // -> was permLeft set up properly with at least one property to match? + if ( 0 == matches ) + return null; + else return permRight.getRuleId(); + } + + + /** + * Cloud Stack API takes a comma separated list of IP ranges as one parameter. + * + * @throws UnsupportedEncodingException + */ + private String constructCIDRList( String[] ipRanges ) throws UnsupportedEncodingException + { + if (null == ipRanges || 0 == ipRanges.length) return null; + StringBuffer cidrList = new StringBuffer(); + + for( int i=0; i < ipRanges.length; i++ ) { + if (0 < i) cidrList.append( "," ); + cidrList.append( ipRanges[i] ); + } + return cidrList.toString(); + } + + /** + * Returns a list of all snapshots + * + * @param request + * @return + */ + public EC2DescribeSnapshotsResponse handleRequest( EC2DescribeSnapshots request ) + { + EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); + EC2SnapshotFilterSet sfs = request.getFilterSet(); + + try { + // -> query to get the volume size for each snapshot + EC2DescribeSnapshotsResponse response = listSnapshots( request.getSnapshotSet()); + if (response == null) { + return new EC2DescribeSnapshotsResponse(); + } + EC2Snapshot[] snapshots = response.getSnapshotSet(); + for (EC2Snapshot snap : snapshots) { + volumes = listVolumes(snap.getVolumeId(), null, volumes); + EC2Volume[] volSet = volumes.getVolumeSet(); + if (0 < volSet.length) snap.setVolumeSize(volSet[0].getSize()); + volumes.reset(); + } + + if ( null == sfs ) + return response; + else return sfs.evaluate( response ); + } catch( EC2ServiceException error ) { + logger.error( "EC2 DescribeSnapshots - ", error); + throw error; + + } catch( Exception e ) { + logger.error( "EC2 DescribeSnapshots - ", e); + throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); + } + } + + /** + * Creates a snapshot + * + * @param volumeId + * @return + */ + public EC2Snapshot createSnapshot( String volumeId ) { + try { + + CloudStackSnapshot snap = getApi().createSnapshot(volumeId, null, null, null); + if (snap == null) { + throw new EC2ServiceException(ServerError.InternalError, "Unable to create snapshot!"); + } + EC2Snapshot ec2Snapshot = new EC2Snapshot(); + + ec2Snapshot.setId(snap.getId()); + ec2Snapshot.setName(snap.getName()); + ec2Snapshot.setType(snap.getSnapshotType()); + ec2Snapshot.setAccountName(snap.getAccountName()); + ec2Snapshot.setDomainId(snap.getDomainId()); + ec2Snapshot.setCreated(snap.getCreated()); + ec2Snapshot.setVolumeId(snap.getVolumeId()); + + List vols = getApi().listVolumes(null, null, null, snap.getVolumeId(), null, null, null, null, null, null, null); + + if(vols.size() > 0) { + assert(vols.get(0).getSize() != null); + Long sizeInGB = vols.get(0).getSize().longValue()/1073741824; + ec2Snapshot.setVolumeSize(sizeInGB); + } + + return ec2Snapshot; + } catch( Exception e ) { + logger.error( "EC2 CreateSnapshot - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Deletes a snapshot + * + * @param snapshotId + * @return + */ + public boolean deleteSnapshot(String snapshotId) { + try { + + CloudStackInfoResponse resp = getApi().deleteSnapshot(snapshotId); + if(resp != null) { + return resp.getSuccess(); + } + + return false; + } catch(Exception e) { + logger.error( "EC2 DeleteSnapshot - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Modify an existing template + * + * @param request + * @return + */ + public boolean modifyImageAttribute( EC2Image request ) + { + // TODO: This is incomplete + EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); + + try { + images = listTemplates( request.getId(), images ); + EC2Image[] imageSet = images.getImageSet(); + + CloudStackTemplate resp = getApi().updateTemplate(request.getId(), null, request.getDescription(), null, imageSet[0].getName(), null, null); + if (resp != null) { + return true; + } + return false; + } catch( Exception e ) { + logger.error( "EC2 ModifyImage - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * If given a specific list of snapshots of interest, then only values from those snapshots are returned. + * + * @param interestedShots - can be null, should be a subset of all snapshots + */ + private EC2DescribeSnapshotsResponse listSnapshots( String[] interestedShots ) throws Exception { + EC2DescribeSnapshotsResponse snapshots = new EC2DescribeSnapshotsResponse(); + + List cloudSnaps; + if (interestedShots == null || interestedShots.length == 0) { + cloudSnaps = getApi().listSnapshots(null, null, null, null, null, null, null, null, null); + } else { + cloudSnaps = new ArrayList(); + + for(String id : interestedShots) { + List tmpList = getApi().listSnapshots(null, null, id, null, null, null, null, null, null); + cloudSnaps.addAll(tmpList); + } + } + + if (cloudSnaps == null) { + return null; + } + + for(CloudStackSnapshot cloudSnapshot : cloudSnaps) { + EC2Snapshot shot = new EC2Snapshot(); + shot.setId(cloudSnapshot.getId()); + shot.setName(cloudSnapshot.getName()); + shot.setVolumeId(cloudSnapshot.getVolumeId()); + shot.setType(cloudSnapshot.getSnapshotType()); + shot.setState(cloudSnapshot.getState()); + shot.setCreated(cloudSnapshot.getCreated()); + shot.setAccountName(cloudSnapshot.getAccountName()); + shot.setDomainId(cloudSnapshot.getDomainId()); + + snapshots.addSnapshot(shot); + } + return snapshots; + } + + + // handlers + /** + * return password data from the instance + * + * @param instanceId + * @return + */ + public EC2PasswordData getPasswordData(String instanceId) { + try { + CloudStackPasswordData resp = getApi().getVMPassword(instanceId); + EC2PasswordData passwdData = new EC2PasswordData(); + if (resp != null) { + passwdData.setInstanceId(instanceId); + passwdData.setEncryptedPassword(resp.getEncryptedpassword()); + } + return passwdData; + } catch(Exception e) { + logger.error("EC2 GetPasswordData - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + /** + * Lists SSH KeyPairs on the systme + * + * @param request + * @return + */ + public EC2DescribeKeyPairsResponse describeKeyPairs( EC2DescribeKeyPairs request ) { + try { + EC2KeyPairFilterSet filterSet = request.getKeyFilterSet(); + String[] keyNames = request.getKeyNames(); + List keyPairs = getApi().listSSHKeyPairs(null, null, null); + List keyPairsList = new ArrayList(); + + if (keyPairs != null) { + // Let's trim the list of keypairs to only the ones listed in keyNames + if (keyNames != null && keyNames.length > 0) { + for (CloudStackKeyPair keyPair : keyPairs) { + boolean matched = false; + for (String keyName : keyNames) { + if (keyPair.getName().contains(keyName)) { + matched = true; + break; + } + } + if (matched == false) { + keyPairs.remove(keyPair); + } + } + } + + if (keyPairs.isEmpty() == true) { + throw new EC2ServiceException(ServerError.InternalError, "No keypairs left!"); + } + + // this should be reworked... converting from CloudStackKeyPairResponse to EC2SSHKeyPair is dumb + for (CloudStackKeyPair respKeyPair: keyPairs) { + EC2SSHKeyPair ec2KeyPair = new EC2SSHKeyPair(); + ec2KeyPair.setFingerprint(respKeyPair.getFingerprint()); + ec2KeyPair.setKeyName(respKeyPair.getName()); + ec2KeyPair.setPrivateKey(respKeyPair.getPrivatekey()); + keyPairsList.add(ec2KeyPair); + } + } + return filterSet.evaluate(keyPairsList); + } catch(Exception e) { + logger.error("EC2 DescribeKeyPairs - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Delete SSHKeyPair + * + * @param request + * @return + */ + public boolean deleteKeyPair( EC2DeleteKeyPair request ) { + try { + CloudStackInfoResponse resp = getApi().deleteSSHKeyPair(request.getKeyName(), null, null); + if (resp == null) { + throw new Exception("Ivalid CloudStack API response"); + } + + return resp.getSuccess(); + } catch(Exception e) { + logger.error("EC2 DeleteKeyPair - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Create SSHKeyPair + * + * @param request + * @return + */ + public EC2SSHKeyPair createKeyPair(EC2CreateKeyPair request) { + try { + CloudStackKeyPair resp = getApi().createSSHKeyPair(request.getKeyName(), null, null); + if (resp == null) { + throw new Exception("Ivalid CloudStack API response"); + } + + EC2SSHKeyPair response = new EC2SSHKeyPair(); + response.setFingerprint(resp.getFingerprint()); + response.setKeyName(resp.getName()); + response.setPrivateKey(resp.getPrivatekey()); + + return response; + } catch (Exception e) { + logger.error("EC2 CreateKeyPair - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Import an existing SSH KeyPair + * + * @param request + * @return + */ + public EC2SSHKeyPair importKeyPair( EC2ImportKeyPair request ) { + try { + CloudStackKeyPair resp = getApi().registerSSHKeyPair(request.getKeyName(), request.getPublicKeyMaterial()); + if (resp == null) { + throw new Exception("Ivalid CloudStack API response"); + } + + EC2SSHKeyPair response = new EC2SSHKeyPair(); + response.setFingerprint(resp.getFingerprint()); + response.setKeyName(resp.getName()); + response.setPrivateKey(resp.getPrivatekey()); + + return response; + } catch (Exception e) { + logger.error("EC2 ImportKeyPair - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * list ip addresses that have been allocated + * + * @param request + * @return + */ + public EC2DescribeAddressesResponse describeAddresses( EC2DescribeAddresses request ) { + try { + List addrList = getApi().listPublicIpAddresses(null, null, null, null, null, null, null, null, null); + + EC2AddressFilterSet filterSet = request.getFilterSet(); + List addressList = new ArrayList(); + if (addrList != null && addrList.size() > 0) { + for (CloudStackIpAddress addr: addrList) { + // remember, if no filters are set, request.inPublicIpSet always returns true + if (request.inPublicIpSet(addr.getIpAddress())) { + EC2Address ec2Address = new EC2Address(); + ec2Address.setIpAddress(addr.getIpAddress()); + if (addr.getVirtualMachineId() != null) + ec2Address.setAssociatedInstanceId(addr.getVirtualMachineId().toString()); + addressList.add(ec2Address); + } + } + } + + return filterSet.evaluate(addressList); + } catch(Exception e) { + logger.error("EC2 DescribeAddresses - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * release an IP Address + * + * @param request + * @return + */ + public boolean releaseAddress(EC2ReleaseAddress request) { + try { + CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); + CloudStackInfoResponse resp = getApi().disassociateIpAddress(cloudIp.getId()); + if (resp != null) { + return resp.getSuccess(); + } + } catch(Exception e) { + logger.error("EC2 ReleaseAddress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + return false; + } + + /** + * Associate an address with an instance + * + * @param request + * @return + */ + public boolean associateAddress( EC2AssociateAddress request ) { + try { + CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); + CloudStackUserVm cloudVm = getApi().listVirtualMachines(null, null, null, null, null, null, request.getInstanceId(), null, null, null, null, null, null, null, null).get(0); + + CloudStackInfoResponse resp = getApi().enableStaticNat(cloudIp.getId(), cloudVm.getId()); + if (resp != null) { + return resp.getSuccess(); + } + } catch(Exception e) { + logger.error( "EC2 AssociateAddress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + return false; + } + + /** + * Disassociate an address from an instance + * + * @param request + * @return + */ + public boolean disassociateAddress( EC2DisassociateAddress request ) { + try { + CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); + CloudStackInfoResponse resp = getApi().disassociateIpAddress(cloudIp.getId()); + if (resp != null) { + return resp.getSuccess(); + } + } catch(Exception e) { + logger.error( "EC2 DisassociateAddress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + return false; + } + + /** + * Allocate an address + * + * @param request + * @return + */ + public EC2Address allocateAddress() + { + try { + EC2Address ec2Address = new EC2Address(); + // this gets our networkId + CloudStackAccount caller = getCurrentAccount(); + + CloudStackZone zone = findZone(); + CloudStackNetwork net = findNetwork(zone); +// CloudStackIpAddress resp = getApi().associateIpAddress(null, null, null, "0036952d-48df-4422-9fd0-94b0885e18cb"); + CloudStackIpAddress resp = getApi().associateIpAddress(null, null, null, net.getId()); + ec2Address.setAssociatedInstanceId(resp.getId()); + if (resp.getIpAddress() == null) { + List addrList = getApi().listPublicIpAddresses(null, null, null, null, null, null, null, null, null); + if (addrList != null && addrList.size() > 0) { + for (CloudStackIpAddress addr: addrList) { + if (addr.getId().equalsIgnoreCase(resp.getId())) { + ec2Address.setIpAddress(addr.getIpAddress()); + } + } + } + } else { + ec2Address.setIpAddress(resp.getIpAddress()); + } + + return ec2Address; + } catch(Exception e) { + logger.error( "EC2 AllocateAddress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * List of templates available. We only support the imageSet version of this call or when no search parameters are passed + * which results in asking for all templates. + * + * @param request + * @return + */ + public EC2DescribeImagesResponse describeImages(EC2DescribeImages request) + { + EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); + + try { + String[] templateIds = request.getImageSet(); + + if ( 0 == templateIds.length ) { + return listTemplates(null, images); + } + for (String s : templateIds) { + images = listTemplates(s, images); + } + return images; + + } catch( Exception e ) { + logger.error( "EC2 DescribeImages - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Create a template + * Amazon API just gives us the instanceId to create the template from. + * But our createTemplate function requires the volumeId and osTypeId. + * So to get that we must make the following sequence of cloud API calls: + * 1) listVolumes&virtualMachineId= -- gets the volumeId + * 2) listVirtualMachinees&id= -- gets the templateId + * 3) listTemplates&id= -- gets the osTypeId + * + * If we have to start and stop the VM in question then this function is + * going to take a long time to complete. + * + * @param request + * @return + */ + public EC2CreateImageResponse createImage(EC2CreateImage request) + { + EC2CreateImageResponse response = null; + boolean needsRestart = false; + String volumeId = null; + + try { + // [A] Creating a template from a VM volume should be from the ROOT volume + // Also for this to work the VM must be in a Stopped state so we 'reboot' it if its not + EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); + volumes = listVolumes( null, request.getInstanceId(), volumes ); + EC2Volume[] volSet = volumes.getVolumeSet(); + for (EC2Volume vol : volSet) { + if (vol.getType().equalsIgnoreCase( "ROOT" )) { + String vmState = vol.getVMState(); + if (vmState.equalsIgnoreCase( "running" ) || vmState.equalsIgnoreCase( "starting" )) { + needsRestart = true; + if (!stopVirtualMachine( request.getInstanceId() )) + throw new EC2ServiceException(ClientError.IncorrectState, "CreateImage - instance must be in a stopped state"); + } + volumeId = vol.getId(); + break; + } + } + + // [B] The parameters must be in sorted order for proper signature generation + EC2DescribeInstancesResponse instances = new EC2DescribeInstancesResponse(); + instances = lookupInstances( request.getInstanceId(), instances ); + EC2Instance[] instanceSet = instances.getInstanceSet(); + String templateId = instanceSet[0].getTemplateId(); + + EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); + images = listTemplates( templateId, images ); + EC2Image[] imageSet = images.getImageSet(); + String osTypeId = imageSet[0].getOsTypeId(); + + CloudStackTemplate resp = getApi().createTemplate((request.getDescription() == null ? "" : request.getDescription()), request.getName(), + osTypeId, null, null, null, null, null, null, volumeId); + if (resp == null || resp.getId() == null) { + throw new EC2ServiceException(ServerError.InternalError, "An upexpected error occurred."); + } + + //if template was created succesfully, create the new image response + response = new EC2CreateImageResponse(); + response.setId(resp.getId()); + + // [C] If we stopped the virtual machine now we need to restart it + if (needsRestart) { + if (!startVirtualMachine( request.getInstanceId() )) + throw new EC2ServiceException(ServerError.InternalError, + "CreateImage - restarting instance " + request.getInstanceId() + " failed"); + } + return response; + + } catch( Exception e ) { + logger.error( "EC2 CreateImage - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Register a template + * + * @param request + * @return + */ + public EC2CreateImageResponse registerImage(EC2RegisterImage request) + { + try { + CloudStackAccount caller = getCurrentAccount(); + if (null == request.getFormat() || null == request.getName() || null == request.getOsTypeName() || + null == request.getLocation() || null == request.getZoneName()) + throw new EC2ServiceException(ServerError.InternalError, "Missing parameter - location/architecture/name"); + + List templates = getApi().registerTemplate((request.getDescription() == null ? request.getName() : request.getDescription()), + request.getFormat(), request.getHypervisor(), request.getName(), toOSTypeId(request.getOsTypeName()), request.getLocation(), + toZoneId(request.getZoneName(), null), null, null, null, null, null, null, null, null, null); + if (templates != null) { + // technically we will only ever register a single template... + for (CloudStackTemplate template : templates) { + if (template != null && template.getId() != null) { + EC2CreateImageResponse image = new EC2CreateImageResponse(); + image.setId(template.getId().toString()); + return image; + } + } + } + return null; + } catch( Exception e ) { + logger.error( "EC2 RegisterImage - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Deregister a template(image) + * Our implementation is different from Amazon in that we do delete the template + * when we deregister it. The cloud API has not deregister call. + * + * @param image + * @return + */ + public boolean deregisterImage( EC2Image image ) + { + try { + CloudStackInfoResponse resp = getApi().deleteTemplate(image.getId(), null); + return resp.getSuccess(); + } catch( Exception e ) { + logger.error( "EC2 DeregisterImage - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * list instances + * + * @param request + * @return + */ + public EC2DescribeInstancesResponse describeInstances(EC2DescribeInstances request ) { + try { + return listVirtualMachines( request.getInstancesSet(), request.getFilterSet()); + } catch( Exception e ) { + logger.error( "EC2 DescribeInstances - " ,e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * list Zones + * + * @param request + * @return + */ + public EC2DescribeAvailabilityZonesResponse handleRequest(EC2DescribeAvailabilityZones request) { + try { + CloudStackAccount caller = getCurrentAccount(); + + return listZones(request.getZoneSet(), null); + + } catch( EC2ServiceException error ) { + logger.error( "EC2 DescribeAvailabilityZones - ", error); + throw error; + + } catch( Exception e ) { + logger.error( "EC2 DescribeAvailabilityZones - " ,e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * list volumes + * + * @param request + * @return + */ + public EC2DescribeVolumesResponse handleRequest( EC2DescribeVolumes request ) { + EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); + EC2VolumeFilterSet vfs = request.getFilterSet(); + + try { + String[] volumeIds = request.getVolumeSet(); + if ( 0 == volumeIds.length ){ + volumes = listVolumes( null, null, volumes ); + } else { + for (String s : volumeIds) + volumes = listVolumes(s, null, volumes ); + } + + if ( null == vfs ) + return volumes; + else return vfs.evaluate( volumes ); + } catch( Exception e ) { + logger.error( "EC2 DescribeVolumes - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Attach a volume to an instance + * + * @param request + * @return + */ + public EC2Volume attachVolume( EC2Volume request ) { + try { + request.setDeviceId(mapDeviceToCloudDeviceId(request.getDevice())); + EC2Volume resp = new EC2Volume(); + + CloudStackVolume vol = getApi().attachVolume(request.getId(), request.getInstanceId(), request.getDeviceId()); + if(vol != null) { + resp.setAttached(vol.getAttached()); + resp.setCreated(vol.getCreated()); + resp.setDevice(request.getDevice()); + resp.setDeviceId(vol.getDeviceId()); + resp.setHypervisor(vol.getHypervisor()); + resp.setId(vol.getId()); + resp.setInstanceId(vol.getVirtualMachineId()); + resp.setSize(vol.getSize()); + resp.setSnapshotId(vol.getSnapshotId()); + resp.setState(vol.getState()); + resp.setType(vol.getVolumeType()); + resp.setVMState(vol.getVirtualMachineState()); + resp.setZoneName(vol.getZoneName()); + return resp; + } + throw new EC2ServiceException( ServerError.InternalError, "An unexpected error occurred." ); + } catch( Exception e ) { + logger.error( "EC2 AttachVolume 2 - ", e); + throw new EC2ServiceException( ServerError.InternalError, e.getMessage() != null ? e.getMessage() : e.toString()); + } + } + + /** + * Detach a volume from an instance + * + * @param request + * @return + */ + public EC2Volume detachVolume(EC2Volume request) { + try { + CloudStackVolume vol = getApi().detachVolume(null, request.getId(), null); + EC2Volume resp = new EC2Volume(); + + if(vol != null) { + resp.setAttached(vol.getAttached()); + resp.setCreated(vol.getCreated()); + resp.setDevice(request.getDevice()); + resp.setDeviceId(vol.getDeviceId()); + resp.setHypervisor(vol.getHypervisor()); + resp.setId(vol.getId()); + resp.setInstanceId(vol.getVirtualMachineId()); + resp.setSize(vol.getSize()); + resp.setSnapshotId(vol.getSnapshotId()); + resp.setState(vol.getState()); + resp.setType(vol.getVolumeType()); + resp.setVMState(vol.getVirtualMachineState()); + resp.setZoneName(vol.getZoneName()); + return resp; + } + + throw new EC2ServiceException( ServerError.InternalError, "An unexpected error occurred." ); + } catch( Exception e ) { + logger.error( "EC2 DetachVolume - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Create a volume + * + * @param request + * @return + */ + public EC2Volume createVolume( EC2CreateVolume request ) { + try { + + CloudStackAccount caller = getCurrentAccount(); + // -> put either snapshotid or diskofferingid on the request + String snapshotId = request.getSnapshotId(); + Long size = request.getSize(); + String diskOfferingId = null; + + if (snapshotId == null) { + List disks = getApi().listDiskOfferings(null, null, null, null); + for (CloudStackDiskOffering offer : disks) { + if (offer.isCustomized()) { + diskOfferingId = offer.getId(); + } + } + if (diskOfferingId == null) throw new EC2ServiceException(ServerError.InternalError, "No Customize Disk Offering Found"); + } + +// // -> no volume name is given in the Amazon request but is required in the cloud API + CloudStackVolume vol = getApi().createVolume(UUID.randomUUID().toString(), null, diskOfferingId, null, size, snapshotId, toZoneId(request.getZoneName(), null)); + if (vol != null) { + EC2Volume resp = new EC2Volume(); + resp.setAttached(vol.getAttached()); + resp.setCreated(vol.getCreated()); +// resp.setDevice(); + resp.setDeviceId(vol.getDeviceId()); + resp.setHypervisor(vol.getHypervisor()); + resp.setId(vol.getId()); + resp.setInstanceId(vol.getVirtualMachineId()); + resp.setSize(vol.getSize()); + resp.setSnapshotId(vol.getSnapshotId()); + resp.setState(vol.getState()); + resp.setType(vol.getVolumeType()); + resp.setVMState(vol.getVirtualMachineState()); + resp.setZoneName(vol.getZoneName()); + return resp; + } + return null; + } catch( Exception e ) { + logger.error( "EC2 CreateVolume - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Delete a volume + * + * @param request + * @return + */ + public EC2Volume deleteVolume( EC2Volume request ) { + try { + CloudStackInfoResponse resp = getApi().deleteVolume(request.getId()); + if(resp != null) { + request.setState("deleted"); + return request; + } + + throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); + } catch( Exception e ) { + logger.error( "EC2 DeleteVolume 2 - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Reboot an instance or instances + * + * @param request + * @return + */ + public boolean rebootInstances(EC2RebootInstances request) + { + EC2Instance[] vms = null; + + // -> reboot is not allowed on destroyed (i.e., terminated) instances + try { + String[] instanceSet = request.getInstancesSet(); + EC2DescribeInstancesResponse previousState = listVirtualMachines( instanceSet, null ); + vms = previousState.getInstanceSet(); + + // -> send reboot requests for each found VM + for (EC2Instance vm : vms) { + if (vm.getState().equalsIgnoreCase( "Destroyed" )) continue; + + CloudStackUserVm resp = getApi().rebootVirtualMachine(vm.getId()); + if (logger.isDebugEnabled()) + logger.debug("Rebooting VM " + resp.getId() + " job " + resp.getJobId()); + } + + // -> if some specified VMs where not found we have to tell the caller + if (instanceSet.length != vms.length) + throw new EC2ServiceException(ClientError.InvalidAMIID_NotFound, "One or more instanceIds do not exist, other instances rebooted."); + + return true; + } catch( Exception e ) { + logger.error( "EC2 RebootInstances - ", e ); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Using a template (AMI), launch n instances + * + * @param request + * @return + */ + public EC2RunInstancesResponse runInstances(EC2RunInstances request) { + EC2RunInstancesResponse instances = new EC2RunInstancesResponse(); + int createInstances = 0; + int canCreateInstances = -1; + int countCreated = 0; + + try { + CloudStackAccount caller = getCurrentAccount(); + + // ugly... + canCreateInstances = calculateAllowedInstances(); + if (-1 == canCreateInstances) canCreateInstances = request.getMaxCount(); + + if (canCreateInstances < request.getMinCount()) { + logger.info( "EC2 RunInstances - min count too big (" + request.getMinCount() + "), " + canCreateInstances + " left to allocate"); + throw new EC2ServiceException(ClientError.InstanceLimitExceeded ,"Only " + canCreateInstances + " instance(s) left to allocate"); + } + + if ( canCreateInstances < request.getMaxCount()) + createInstances = canCreateInstances; + else + createInstances = request.getMaxCount(); + + //find CS service Offering ID + String instanceType = "m1.small"; + if(request.getInstanceType() != null){ + instanceType = request.getInstanceType(); + } + CloudStackServiceOffering svcOffering = getCSServiceOfferingId(instanceType); + if(svcOffering == null){ + logger.info("No ServiceOffering found to be defined by name, please contact the administrator "+instanceType ); + throw new EC2ServiceException(ClientError.Unsupported, "instanceType: [" + instanceType + "] not found!"); + } + + // zone stuff + String zoneId = toZoneId(request.getZoneName(), null); + + List zones = getApi().listZones(null, null, zoneId, null); + if (zones == null || zones.size() == 0) { + logger.info("EC2 RunInstances - zone [" + request.getZoneName() + "] not found!"); + throw new EC2ServiceException(ClientError.InvalidZone_NotFound, "ZoneId [" + request.getZoneName() + "] not found!"); + } + // we choose first zone? + CloudStackZone zone = zones.get(0); + + // network + CloudStackNetwork network = findNetwork(zone); + + // now actually deploy the vms + for( int i=0; i < createInstances; i++ ) { + CloudStackUserVm resp = getApi().deployVirtualMachine(svcOffering.getId(), + request.getTemplateId(), zoneId, null, null, null, null, + null, null, null, request.getKeyName(), null, (network != null ? network.getId() : null), + null, null, request.getSize().longValue(), request.getUserData()); + EC2Instance vm = new EC2Instance(); + vm.setId(resp.getId().toString()); + vm.setName(resp.getName()); + vm.setZoneName(resp.getZoneName()); + vm.setTemplateId(resp.getTemplateId().toString()); + if (resp.getSecurityGroupList() != null && resp.getSecurityGroupList().size() > 0) { + // TODO, we have a list of security groups, just return the first one? + CloudStackSecurityGroup securityGroup = resp.getSecurityGroupList().get(0); + vm.setGroup(securityGroup.getName()); + } + vm.setState(resp.getState()); + vm.setCreated(resp.getCreated()); + vm.setIpAddress(resp.getIpAddress()); + vm.setAccountName(resp.getAccountName()); + vm.setDomainId(resp.getDomainId()); + vm.setHypervisor(resp.getHypervisor()); + vm.setServiceOffering( svcOffering.getName()); + instances.addInstance(vm); + countCreated++; + } + + if (0 == countCreated) { + // TODO, we actually need to destroy left-over VMs when the exception is thrown + throw new EC2ServiceException(ServerError.InsufficientInstanceCapacity, "Insufficient Instance Capacity" ); + } + + return instances; + } catch( Exception e ) { + logger.error( "EC2 RunInstances - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Start an instance or instances + * + * @param request + * @return + */ + public EC2StartInstancesResponse startInstances(EC2StartInstances request) { + EC2StartInstancesResponse instances = new EC2StartInstancesResponse(); + EC2Instance[] vms = null; + + // -> first determine the current state of each VM (becomes it previous state) + try { + EC2DescribeInstancesResponse previousState = listVirtualMachines( request.getInstancesSet(), null ); + vms = previousState.getInstanceSet(); + + // -> send start requests for each item + for (EC2Instance vm : vms) { + vm.setPreviousState(vm.getState()); + + // -> if its already running then we don't care + if (vm.getState().equalsIgnoreCase( "Running" ) || vm.getState().equalsIgnoreCase( "Destroyed" )) continue; + + CloudStackUserVm resp = getApi().startVirtualMachine(vm.getId()); + if(resp != null){ + vm.setState(resp.getState()); + if(logger.isDebugEnabled()) + logger.debug("Starting VM " + vm.getId() + " job " + resp.getJobId()); + } + instances.addInstance(vm); + } + return instances; + } catch( Exception e ) { + logger.error( "EC2 StartInstances - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Stop an instance or instances + * + * @param request + * @return + */ + public EC2StopInstancesResponse stopInstances(EC2StopInstances request) { + EC2StopInstancesResponse instances = new EC2StopInstancesResponse(); + EC2Instance[] virtualMachines = null; + + // -> first determine the current state of each VM (becomes it previous state) + try { + String[] instanceSet = request.getInstancesSet(); + + EC2DescribeInstancesResponse previousState = listVirtualMachines( instanceSet, null ); + virtualMachines = previousState.getInstanceSet(); + + // -> send stop requests for each item + for (EC2Instance vm : virtualMachines) { + vm.setPreviousState( vm.getState()); + CloudStackUserVm resp = null; + if (request.getDestroyInstances()) { + if (vm.getState().equalsIgnoreCase( "Destroyed" )) continue; + resp = getApi().destroyVirtualMachine(vm.getId()); + if(logger.isDebugEnabled()) + logger.debug("Destroying VM " + vm.getId() + " job " + resp.getJobId()); + } else { + if (vm.getState().equalsIgnoreCase("Stopped") || vm.getState().equalsIgnoreCase("Destroyed")) continue; + resp = getApi().stopVirtualMachine(vm.getId(), false); + if(logger.isDebugEnabled()) + logger.debug("Stopping VM " + vm.getId() + " job " + resp.getJobId()); + } + if (resp != null) { + vm.setState(resp.getState()); + instances.addInstance(vm); + } + } + return instances; + } catch( Exception e ) { + logger.error( "EC2 StopInstances - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * RunInstances includes a min and max count of requested instances to create. + * We have to be able to create the min number for the user or none at all. So + * here we determine what the user has left to create. + * + * @return -1 means no limit exists, other positive numbers give max number left that + * the user can create. + */ + private int calculateAllowedInstances() throws Exception { + int maxAllowed = -1; + + CloudStackAccount ourAccount = getCurrentAccount(); + + if (ourAccount == null) { + // This should never happen, but + // we will return -99999 if this happens... + return -99999; + } + + // if accountType is Admin == 1, then let's return -1 + if (ourAccount.getAccountType() == 1) return -1; + + // -> get the user limits on instances + // "0" represents instances: + // http://download.cloud.com/releases/2.2.0/api_2.2.8/user/listResourceLimits.html + List limits = getApi().listResourceLimits(null, null, null, null, "0"); + if (limits != null && limits.size() > 0) { + maxAllowed = (int)limits.get(0).getMax().longValue(); + if (maxAllowed == -1) + return -1; // no limit + + EC2DescribeInstancesResponse existingVMS = listVirtualMachines( null, null ); + EC2Instance[] vmsList = existingVMS.getInstanceSet(); + return (maxAllowed - vmsList.length); + } else { + return 0; + } + } + + /** + * Performs the cloud API listVirtualMachines one or more times. + * + * @param virtualMachineIds - an array of instances we are interested in getting information on + * @param ifs - filter out unwanted instances + */ + private EC2DescribeInstancesResponse listVirtualMachines( String[] virtualMachineIds, EC2InstanceFilterSet ifs ) throws Exception + { + EC2DescribeInstancesResponse instances = new EC2DescribeInstancesResponse(); + + if (null == virtualMachineIds || 0 == virtualMachineIds.length) { + instances = lookupInstances( null, instances ); + } else { + for( int i=0; i < virtualMachineIds.length; i++ ) { + instances = lookupInstances( virtualMachineIds[i], instances ); + } + } + + if ( null == ifs ) + return instances; + else return ifs.evaluate( instances ); + } + + /** + * Get one or more templates depending on the volumeId parameter. + * + * @param volumeId - if interested in one specific volume, null if want to list all volumes + * @param instanceId - if interested in volumes for a specific instance, null if instance is not important + */ + private EC2DescribeVolumesResponse listVolumes(String volumeId, String instanceId, EC2DescribeVolumesResponse volumes)throws Exception { + + List vols = getApi().listVolumes(null, null, null, volumeId, null, null, null, null, null, instanceId, null); + if(vols != null && vols.size() > 0) { + for(CloudStackVolume vol : vols) { + EC2Volume ec2Vol = new EC2Volume(); + ec2Vol.setId(vol.getId()); + if(vol.getAttached() != null) + ec2Vol.setAttached(vol.getAttached()); + ec2Vol.setCreated(vol.getCreated()); + + if(vol.getDeviceId() != null) + ec2Vol.setDeviceId(vol.getDeviceId()); + ec2Vol.setHypervisor(vol.getHypervisor()); + + if(vol.getSnapshotId() != null) + ec2Vol.setSnapshotId(vol.getSnapshotId()); + ec2Vol.setState(mapToAmazonVolState(vol.getState())); + ec2Vol.setSize(vol.getSize()); + ec2Vol.setType(vol.getVolumeType()); + + if(vol.getVirtualMachineId() != null) + ec2Vol.setInstanceId(vol.getVirtualMachineId()); + + if(vol.getVirtualMachineState() != null) + ec2Vol.setVMState(vol.getVirtualMachineState()); + ec2Vol.setZoneName(vol.getZoneName()); + + volumes.addVolume(ec2Vol); + } + } + + return volumes; + } + + /** + * Translate the given zone name into the required zoneId. Query for + * a list of all zones and match the zone name given. Amazon uses zone + * names while the Cloud API often requires the zoneId. + * + * @param zoneName - (e.g., 'AH'), if null return the first zone in the available list + * + * @return the zoneId that matches the given zone name + */ + private String toZoneId(String zoneName, String domainId) throws Exception { + EC2DescribeAvailabilityZonesResponse zones = null; + String[] interestedZones = null; + + if ( null != zoneName) { + interestedZones = new String[1]; + interestedZones[0] = zoneName; + } + zones = listZones(interestedZones, domainId); + + if (zones == null || zones.getZoneIdAt( 0 ) == null) + throw new EC2ServiceException(ClientError.InvalidParameterValue, "Unknown zoneName value - " + zoneName); + return zones.getZoneIdAt(0); + } + + + /** + * Convert from the Amazon instanceType strings to Cloud serviceOfferingId + * + */ + + private CloudStackServiceOffering getCSServiceOfferingId(String instanceType){ + try { + if (null == instanceType) instanceType = "m1.small"; + + CloudStackSvcOfferingDao dao = new CloudStackSvcOfferingDao(); + return dao.getSvcOfferingByName(instanceType); + + } catch(Exception e) { + logger.error( "Error while retrieving ServiceOffering information by name - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Convert from the Cloud serviceOfferingId to the Amazon instanceType strings based + * on the loaded map. + * + * @param serviceOfferingId + * @return A valid value for the Amazon defined instanceType + * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException + */ + private String serviceOfferingIdToInstanceType( String serviceOfferingId ){ + try{ + CloudStackSvcOfferingDao dao = new CloudStackSvcOfferingDao(); + CloudStackServiceOffering offering = dao.getSvcOfferingById(serviceOfferingId); + if(offering == null){ + logger.warn( "No instanceType match for serviceOfferingId: [" + serviceOfferingId + "]" ); + return "m1.small"; + } + return offering.getName(); + } + catch(Exception e) { + logger.error( "sError while retrieving ServiceOffering information by id - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Match the value in the 'description' field of the listOsTypes response to get + * the osTypeId. + * + * @param osTypeName + * @return the Cloud.com API osTypeId + */ + private String toOSTypeId( String osTypeName ) throws Exception { + try { + List osTypes = getApi().listOsTypes(null, null, null); + for (CloudStackOsType osType : osTypes) { + if (osType.getDescription().toLowerCase().indexOf(osTypeName.toLowerCase()) != -1) + return osType.getId(); + } + return null; + } catch(Exception e) { + logger.error( "List OS Types - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + + } + + /** + * More than one place we need to access the defined list of zones. If given a specific + * list of zones of interest, then only values from those zones are returned. + * + * @param interestedZones - can be null, should be a subset of all zones + * + * @return EC2DescribeAvailabilityZonesResponse + */ + private EC2DescribeAvailabilityZonesResponse listZones(String[] interestedZones, String domainId) throws Exception + { + EC2DescribeAvailabilityZonesResponse zones = new EC2DescribeAvailabilityZonesResponse(); + + List cloudZones = getApi().listZones(true, domainId, null, null); + + if(cloudZones != null) { + for(CloudStackZone cloudZone : cloudZones) { + if ( null != interestedZones && 0 < interestedZones.length ) { + for( int j=0; j < interestedZones.length; j++ ) { + if (interestedZones[j].equalsIgnoreCase( cloudZone.getName())) { + zones.addZone(cloudZone.getId().toString(), cloudZone.getName()); + break; + } + } + } else { + zones.addZone(cloudZone.getId().toString(), cloudZone.getName()); + } + } + } + return zones; + } + + + /** + * Get information on one or more virtual machines depending on the instanceId parameter. + * + * @param instanceId - if null then return information on all existing instances, otherwise + * just return information on the matching instance. + * @param instances - a container object to fill with one or more EC2Instance objects + * + * @return the same object passed in as the "instances" parameter modified with one or more + * EC2Instance objects loaded. + */ + private EC2DescribeInstancesResponse lookupInstances( String instanceId, EC2DescribeInstancesResponse instances ) + throws Exception { + + String instId = instanceId != null ? instanceId : null; + List vms = getApi().listVirtualMachines(null, null, null, null, null, null, + instId, null, null, null, null, null, null, null, null); + + if(vms != null && vms.size() > 0) { + for(CloudStackUserVm cloudVm : vms) { + EC2Instance ec2Vm = new EC2Instance(); + + ec2Vm.setId(cloudVm.getId().toString()); + ec2Vm.setName(cloudVm.getName()); + ec2Vm.setZoneName(cloudVm.getZoneName()); + ec2Vm.setTemplateId(cloudVm.getTemplateId().toString()); + ec2Vm.setGroup(cloudVm.getGroup()); + ec2Vm.setState(cloudVm.getState()); + ec2Vm.setCreated(cloudVm.getCreated()); + ec2Vm.setIpAddress(cloudVm.getIpAddress()); + ec2Vm.setAccountName(cloudVm.getAccountName()); + ec2Vm.setDomainId(cloudVm.getDomainId()); + ec2Vm.setHypervisor(cloudVm.getHypervisor()); + ec2Vm.setRootDeviceType(cloudVm.getRootDeviceType()); + ec2Vm.setRootDeviceId(cloudVm.getRootDeviceId()); + ec2Vm.setServiceOffering(serviceOfferingIdToInstanceType(cloudVm.getServiceOfferingId().toString())); + + List nics = cloudVm.getNics(); + for(CloudStackNic nic : nics) { + if(nic.getIsDefault()) { + ec2Vm.setPrivateIpAddress(nic.getIpaddress()); + break; + } + } + instances.addInstance(ec2Vm); + } + } + return instances; + } + + + /** + * Get one or more templates depending on the templateId parameter. + * + * @param templateId - if null then return information on all existing templates, otherwise + * just return information on the matching template. + * @param images - a container object to fill with one or more EC2Image objects + * + * @return the same object passed in as the "images" parameter modified with one or more + * EC2Image objects loaded. + */ + private EC2DescribeImagesResponse listTemplates( String templateId, EC2DescribeImagesResponse images ) throws EC2ServiceException { + try { + List result = new ArrayList(); + + if(templateId != null){ + List template = getApi().listTemplates("executable", null, null, null, templateId , null, null, null); + if(template != null){ + result.addAll(template); + } + }else{ + List selfExecutable = getApi().listTemplates("selfexecutable", null, null, null, null, null, null, null); + if(selfExecutable != null){ + result.addAll(selfExecutable); + } + + List featured = getApi().listTemplates("featured", null, null, null, null, null, null, null); + if(featured != null){ + result.addAll(featured); + } + + List sharedExecutable = getApi().listTemplates("sharedexecutable", null, null, null, null, null, null, null); + if(sharedExecutable != null){ + result.addAll(sharedExecutable); + } + + List community = getApi().listTemplates("community", null, null, null, null, null, null, null); + if(community != null){ + result.addAll(community); + } + } + + if (result != null && result.size() > 0) { + for (CloudStackTemplate temp : result) { + EC2Image ec2Image = new EC2Image(); + ec2Image.setId(temp.getId().toString()); + ec2Image.setAccountName(temp.getAccount()); + ec2Image.setName(temp.getName()); + ec2Image.setDescription(temp.getDisplayText()); + ec2Image.setOsTypeId(temp.getOsTypeId().toString()); + ec2Image.setIsPublic(temp.getIsPublic()); + ec2Image.setIsReady(temp.getIsReady()); + ec2Image.setDomainId(temp.getDomainId()); + images.addImage(ec2Image); + } + } + return images; + } catch(Exception e) { + logger.error( "List Templates - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * List security groups + * + * @param interestedGroups + * @return + * @throws EC2ServiceException + * @throws UnsupportedEncodingException + * @throws SignatureException + * @throws IOException + * @throws SAXException + * @throws ParserConfigurationException + * @throws ParseException + */ + public EC2DescribeSecurityGroupsResponse listSecurityGroups( String[] interestedGroups ) throws Exception { + try { + EC2DescribeSecurityGroupsResponse groupSet = new EC2DescribeSecurityGroupsResponse(); + + List groups = getApi().listSecurityGroups(null, null, null, null, null, null); + if (groups != null && groups.size() > 0) + for (CloudStackSecurityGroup group : groups) { + boolean matched = false; + if (interestedGroups.length > 0) { + for (String groupName :interestedGroups) { + if (groupName.equalsIgnoreCase(group.getName())) { + matched = true; + break; + } + } + } else { + matched = true; + } + if (!matched) continue; + EC2SecurityGroup ec2Group = new EC2SecurityGroup(); + // not sure if we should set both account and account name to accountname + ec2Group.setAccount(group.getAccountName()); + ec2Group.setAccountName(group.getAccountName()); + ec2Group.setName(group.getName()); + ec2Group.setDescription(group.getDescription()); + ec2Group.setDomainId(group.getDomainId()); + ec2Group.setId(group.getId().toString()); + toPermission(ec2Group, group); + + groupSet.addGroup(ec2Group); + } + return groupSet; + } catch(Exception e) { + logger.error( "List Security Groups - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Convert ingress rule to EC2IpPermission records + * + * @param response + * @param group + * @return + */ + private boolean toPermission(EC2SecurityGroup response, CloudStackSecurityGroup group ) { + List rules = group.getIngressRules(); + + if (rules == null || rules.isEmpty()) return false; + + for (CloudStackIngressRule rule : rules) { + EC2IpPermission perm = new EC2IpPermission(); + perm.setProtocol(rule.getProtocol()); + perm.setFromPort(rule.getStartPort()); + perm.setToPort(rule.getEndPort()); + perm.setRuleId(rule.getRuleId() != null ? rule.getRuleId().toString() : new String()); + perm.setIcmpCode(rule.getIcmpCode() != null ? rule.getIcmpCode().toString() : new String()); + perm.setIcmpType(rule.getIcmpType() != null ? rule.getIcmpType().toString() : new String()); + perm.setCIDR(rule.getCidr()); + perm.addIpRange(rule.getCidr()); + + if (rule.getAccountName() != null && rule.getSecurityGroupName() != null) { + EC2SecurityGroup newGroup = new EC2SecurityGroup(); + newGroup.setAccount(rule.getAccountName()); + newGroup.setName(rule.getSecurityGroupName()); + perm.addUser(newGroup); + } + response.addIpPermission(perm); + } + return true; + } + + /** + * Find the current account based on the SecretKey + * + * @return + * @throws Exception + */ + public CloudStackAccount getCurrentAccount() throws Exception { + if (currentAccount != null) { + // verify this is the same account!!! + for (CloudStackUser user : currentAccount.getUser()) { + if (user.getSecretkey() != null && user.getSecretkey().equalsIgnoreCase(UserContext.current().getSecretKey())) { + return currentAccount; + } + } + } + // otherwise let's find this user/account + List accounts = getApi().listAccounts(null, null, null, null, null, null, null, null); + for (CloudStackAccount account : accounts) { + CloudStackUser[] users = account.getUser(); + for (CloudStackUser user : users) { + String userSecretKey = user.getSecretkey(); + if (userSecretKey != null && userSecretKey.equalsIgnoreCase(UserContext.current().getSecretKey())) { + currentAccount = account; + return account; + } + } + } + // if we get here, there is something wrong... + return null; + } + + /** + * List networkOfferings by zone with securityGroup enabled + * + * @param zoneId + * @return + * @throws Exception + */ + private CloudStackNetwork getNetworksWithSecurityGroupEnabled(String zoneId) throws Exception { + List networks = getApi().listNetworks(null, null, null, null, null, null, null, null, null, zoneId); + List netWithSecGroup = new ArrayList(); + for (CloudStackNetwork network : networks ) { + if (!network.getNetworkOfferingAvailability().equalsIgnoreCase("unavailable") && network.getSecurityGroupEnabled()) + netWithSecGroup.add(network); + } + // we'll take the first one + return netWithSecGroup.get(0); + } + + /** + * Create a network + * + * @param zoneId + * @param offering + * @param owner + * @return + * @throws Exception + */ + private CloudStackNetwork createDefaultGuestNetwork(String zoneId, CloudStackNetworkOffering offering, CloudStackAccount owner) throws Exception { + return getApi().createNetwork(owner.getName() + "-network", owner.getName() + "-network", offering.getId(), zoneId, owner.getName(), + owner.getDomainId(), true, null, null, null, null, null, null, null, null); + } + + /** + * List of networks without securityGroup enabled by zone + * + * @param zoneId + * @return + * @throws Exception + */ + private CloudStackNetwork getNetworksWithoutSecurityGroupEnabled(String zoneId) throws Exception { + // grab current account + CloudStackAccount caller = getCurrentAccount(); + + //check if account has any networks in the system + List networks = getApi().listNetworks(caller.getName(), caller.getDomainId(), null, true, null, null, null, null, null, zoneId); + + //listRequired offerings in the system - the network created from this offering has to be specified in deployVm command + List reuquiredOfferings = getApi().listNetworkOfferings("Required", null, null, null, true, null, null, null, null, null, zoneId); + if (reuquiredOfferings != null && !reuquiredOfferings.isEmpty()) { + if (networks != null && !networks.isEmpty()) { + //pick up the first required network from the network list + for (CloudStackNetwork network : networks) { + for (CloudStackNetworkOffering requiredOffering : reuquiredOfferings) { + logger.debug("[reqd/virtual} offering: " + requiredOffering.getId() + " network " + network.getNetworkOfferingId()); + if (network.getNetworkOfferingId().equals(requiredOffering.getId())) { + return network; + } + } + } + } else { + //create new network and return it + return createDefaultGuestNetwork(zoneId, reuquiredOfferings.get(0), caller); + } + } else { + //find all optional network offerings in the system + List optionalOfferings = getApi().listNetworkOfferings("Optional", null, null, null, true, null, null, null, null, null, zoneId); + if (optionalOfferings != null && !optionalOfferings.isEmpty()) { + if (networks != null && !networks.isEmpty()) { + for (CloudStackNetwork network : networks) { + for (CloudStackNetworkOffering optionalOffering : optionalOfferings) { + logger.debug("[optional] offering: " + optionalOffering.getId() + " network " + network.getNetworkOfferingId()); + if (network.getNetworkOfferingId().equals(optionalOffering.getId())) { + return network; + } + } + } + } + } + } + + // if we get this far and haven't returned already return an error + throw new EC2ServiceException(ServerError.InternalError, "Unable to find an appropriate network for account " + caller.getName()); + } + + /** + * Find a suitable network to use for deployVM + * + * @param zone + * @return + * @throws Exception + */ + private CloudStackNetwork findNetwork(CloudStackZone zone) throws Exception { + if (zone == null) return null; + + // for basic networking, we don't specify a networkid for deployvm + if (zone.getNetworkType().equalsIgnoreCase("basic")) return null; + + if (zone.getSecurityGroupsEnabled()) { + // find system security group enabled network + return getNetworksWithSecurityGroupEnabled(zone.getId()); + + } else { + return getNetworksWithoutSecurityGroupEnabled(zone.getId()); + } + } + + private CloudStackZone findZone() throws Exception { + CloudStackAccount caller = getCurrentAccount(); + // caller.getDomainId doesn't work in user mode +// List cloudZones = getApi().listZones(true, caller.getDomainId(), null, null); + List cloudZones = getApi().listZones(true, null, null, null); + if (cloudZones != null && cloudZones.size() > 0) { + return cloudZones.get(0); + } + return null; + } + + /** + * Windows has its own device strings. + * + * @param hypervisor + * @param deviceId + * @return + */ + public String cloudDeviceIdToDevicePath( String hypervisor, String deviceId ) + { + Integer devId = new Integer(deviceId); + if (null != hypervisor && hypervisor.toLowerCase().contains( "windows" )) { + switch( devId ) { + case 1: return "xvdb"; + case 2: return "xvdc"; + case 3: return "xvdd"; + case 4: return "xvde"; + case 5: return "xvdf"; + case 6: return "xvdg"; + case 7: return "xvdh"; + case 8: return "xvdi"; + case 9: return "xvdj"; + default: return new String( "" + deviceId ); + } + } else { // -> assume its unix + switch( devId ) { + case 1: return "/dev/sdb"; + case 2: return "/dev/sdc"; + case 3: return "/dev/sdd"; + case 4: return "/dev/sde"; + case 5: return "/dev/sdf"; + case 6: return "/dev/sdg"; + case 7: return "/dev/sdh"; + case 8: return "/dev/sdi"; + case 9: return "/dev/sdj"; + default: return new String( "" + deviceId ); + } + } + } + + + /** + * Translate the device name string into a Cloud Stack deviceId. + * deviceId 3 is reserved for CDROM and 0 for the ROOT disk + * + * @param device string + * @return deviceId value + */ + private String mapDeviceToCloudDeviceId( String device ) + { + if (device.equalsIgnoreCase( "/dev/sdb" )) return "1"; + else if (device.equalsIgnoreCase( "/dev/sdc" )) return "2"; + else if (device.equalsIgnoreCase( "/dev/sde" )) return "4"; + else if (device.equalsIgnoreCase( "/dev/sdf" )) return "5"; + else if (device.equalsIgnoreCase( "/dev/sdg" )) return "6"; + else if (device.equalsIgnoreCase( "/dev/sdh" )) return "7"; + else if (device.equalsIgnoreCase( "/dev/sdi" )) return "8"; + else if (device.equalsIgnoreCase( "/dev/sdj" )) return "9"; + + else if (device.equalsIgnoreCase( "/dev/xvdb" )) return "1"; + else if (device.equalsIgnoreCase( "/dev/xvdc" )) return "2"; + else if (device.equalsIgnoreCase( "/dev/xvde" )) return "4"; + else if (device.equalsIgnoreCase( "/dev/xvdf" )) return "5"; + else if (device.equalsIgnoreCase( "/dev/xvdg" )) return "6"; + else if (device.equalsIgnoreCase( "/dev/xvdh" )) return "7"; + else if (device.equalsIgnoreCase( "/dev/xvdi" )) return "8"; + else if (device.equalsIgnoreCase( "/dev/xvdj" )) return "9"; + + else if (device.equalsIgnoreCase( "xvdb" )) return "1"; + else if (device.equalsIgnoreCase( "xvdc" )) return "2"; + else if (device.equalsIgnoreCase( "xvde" )) return "4"; + else if (device.equalsIgnoreCase( "xvdf" )) return "5"; + else if (device.equalsIgnoreCase( "xvdg" )) return "6"; + else if (device.equalsIgnoreCase( "xvdh" )) return "7"; + else if (device.equalsIgnoreCase( "xvdi" )) return "8"; + else if (device.equalsIgnoreCase( "xvdj" )) return "9"; + + else throw new EC2ServiceException( ClientError.Unsupported, device + " is not supported" ); + } + + /** + * Map CloudStack instance state to Amazon state strings + * + * @param state + * @return + */ + private String mapToAmazonVolState( String state ) + { + if (state.equalsIgnoreCase( "Allocated" ) || + state.equalsIgnoreCase( "Creating" ) || + state.equalsIgnoreCase( "Ready" )) return "available"; + + if (state.equalsIgnoreCase( "Destroy" )) return "deleting"; + + return "error"; + } + + /** + * Stop an instance + * Wait until one specific VM has stopped + * + * @param instanceId + * @return + * @throws Exception + */ + private boolean stopVirtualMachine( String instanceId) throws Exception { + try { + CloudStackUserVm resp = getApi().stopVirtualMachine(instanceId, false); + if (logger.isDebugEnabled()) + logger.debug("Stopping VM " + instanceId ); + return resp != null; + } catch(Exception e) { + logger.error( "StopVirtualMachine - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Start an existing stopped instance(VM) + * + * @param instanceId + * @return + * @throws Exception + */ + private boolean startVirtualMachine( String instanceId ) throws Exception { + try { + CloudStackUserVm resp = getApi().startVirtualMachine(instanceId); + if (logger.isDebugEnabled()) + logger.debug("Starting VM " + instanceId ); + return resp != null; + } catch(Exception e) { + logger.error("StartVirtualMachine - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java index e0793c1b44d..8cd697c9b6f 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java @@ -44,12 +44,8 @@ public class EC2InstanceFilterSet { filterTypes.put( "instance-state-name", "string" ); filterTypes.put( "ip-address", "string" ); filterTypes.put( "owner-id", "string" ); -<<<<<<< HEAD filterTypes.put( "root-device-name", "string" ); filterTypes.put( "private-ip-address", "string" ); -======= - filterTypes.put( "root-device-name", "string" ); ->>>>>>> 6472e7b... Now really adding the renamed files! } diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2RegisterImage.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2RegisterImage.java index 0671e5ef1ac..2ba12a7f5f6 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2RegisterImage.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2RegisterImage.java @@ -23,10 +23,7 @@ public class EC2RegisterImage { private String format; private String zoneName; private String osTypeName; -<<<<<<< HEAD private String hypervisor; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! public EC2RegisterImage() { location = null; @@ -63,11 +60,7 @@ public class EC2RegisterImage { /** * We redefine the expected format of this field to be: -<<<<<<< HEAD * "format:zonename:ostypename:hypervisor" -======= - * "format:zonename:ostypename" ->>>>>>> 6472e7b... Now really adding the renamed files! * * @param param */ @@ -78,10 +71,7 @@ public class EC2RegisterImage { format = parts[0]; zoneName = parts[1]; osTypeName = parts[2]; -<<<<<<< HEAD hypervisor = parts[3]; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! } } } @@ -97,11 +87,8 @@ public class EC2RegisterImage { public String getOsTypeName() { return this.osTypeName; } -<<<<<<< HEAD public String getHypervisor() { return hypervisor; } -======= ->>>>>>> 6472e7b... Now really adding the renamed files! } diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Snapshot.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Snapshot.java index d93ae3647f5..a03856d86fa 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Snapshot.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Snapshot.java @@ -35,11 +35,7 @@ public class EC2Snapshot { id = null; name = null; volumeId = null; -<<<<<<< HEAD volumeSize = new Long(0); -======= - volumeSize = null; ->>>>>>> 6472e7b... Now really adding the renamed files! type = null; state = null; created = null; @@ -76,11 +72,7 @@ public class EC2Snapshot { } public Long getVolumeSize() { -<<<<<<< HEAD return this.volumeSize; -======= - return this.volumeSize; ->>>>>>> 6472e7b... Now really adding the renamed files! } public void setType( String type ) { diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2VolumeFilterSet.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2VolumeFilterSet.java index e830f8e8ae7..663544e0608 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2VolumeFilterSet.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2VolumeFilterSet.java @@ -118,11 +118,7 @@ public class EC2VolumeFilterSet { else if (filterName.equalsIgnoreCase( "size" )) return containsLong(vol.getSize(), valueSet ); else if (filterName.equalsIgnoreCase( "snapshot-id" )) -<<<<<<< HEAD return containsString(String.valueOf(vol.getSnapshotId()), valueSet ); -======= - return containsString(vol.getSnapshotId().toString(), valueSet ); ->>>>>>> 6472e7b... Now really adding the renamed files! else if (filterName.equalsIgnoreCase( "status" )) return containsString(vol.getState(), valueSet ); else if (filterName.equalsIgnoreCase( "volume-id" )) @@ -132,11 +128,7 @@ public class EC2VolumeFilterSet { else if (filterName.equalsIgnoreCase( "attachment.device" )) return containsDevice(vol.getDeviceId(), valueSet ); else if (filterName.equalsIgnoreCase( "attachment.instance-id" )) -<<<<<<< HEAD return containsString(String.valueOf(vol.getInstanceId()), valueSet ); -======= - return containsString(vol.getInstanceId().toString(), valueSet ); ->>>>>>> 6472e7b... Now really adding the renamed files! else return false; } @@ -175,11 +167,8 @@ public class EC2VolumeFilterSet { private boolean containsDevice(String deviceId, String[] set ) { -<<<<<<< HEAD - if (deviceId == null) - return false; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! + if (deviceId == null) + return false; Integer devId = new Integer(deviceId); for (String s : set) { switch( devId ) { diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java index 2509e40c6ba..3846249c604 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java @@ -1,2362 +1,1873 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.ListIterator; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.TimeZone; -import java.util.UUID; - -import javax.servlet.http.HttpServletResponse; - -import org.apache.log4j.Logger; -import org.hibernate.LockMode; -import org.hibernate.Session; -import org.json.simple.parser.ParseException; - -<<<<<<< HEAD -======= -import com.cloud.bridge.io.S3FileSystemBucketAdapter; ->>>>>>> 6472e7b... Now really adding the renamed files! -import com.cloud.bridge.model.MHost; -import com.cloud.bridge.model.MHostMount; -import com.cloud.bridge.model.SAcl; -import com.cloud.bridge.model.SBucket; -import com.cloud.bridge.model.SHost; -import com.cloud.bridge.model.SMeta; -import com.cloud.bridge.model.SObject; -import com.cloud.bridge.model.SObjectItem; -import com.cloud.bridge.persist.PersistContext; -import com.cloud.bridge.persist.dao.BucketPolicyDao; -import com.cloud.bridge.persist.dao.MHostDao; -import com.cloud.bridge.persist.dao.MHostMountDao; -import com.cloud.bridge.persist.dao.MultipartLoadDao; -import com.cloud.bridge.persist.dao.SAclDao; -import com.cloud.bridge.persist.dao.SBucketDao; -import com.cloud.bridge.persist.dao.SHostDao; -import com.cloud.bridge.persist.dao.SMetaDao; -import com.cloud.bridge.persist.dao.SObjectDao; -import com.cloud.bridge.persist.dao.SObjectItemDao; -<<<<<<< HEAD -import com.cloud.bridge.service.S3BucketAdapter; -import com.cloud.bridge.service.S3FileSystemBucketAdapter; -import com.cloud.bridge.service.ServiceProvider; -import com.cloud.bridge.service.UserContext; -======= -import com.cloud.bridge.service.S3Constants; -import com.cloud.bridge.service.UserContext; -import com.cloud.bridge.service.controller.s3.ServiceProvider; ->>>>>>> 6472e7b... Now really adding the renamed files! -import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess; -import com.cloud.bridge.service.core.s3.S3CopyObjectRequest.MetadataDirective; -import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; -import com.cloud.bridge.service.core.s3.S3PolicyCondition.ConditionKeys; -import com.cloud.bridge.service.exception.HostNotMountedException; -import com.cloud.bridge.service.exception.InternalErrorException; -import com.cloud.bridge.service.exception.InvalidBucketName; -import com.cloud.bridge.service.exception.NoSuchObjectException; -import com.cloud.bridge.service.exception.ObjectAlreadyExistsException; -import com.cloud.bridge.service.exception.OutOfServiceException; -import com.cloud.bridge.service.exception.OutOfStorageException; -import com.cloud.bridge.service.exception.PermissionDeniedException; -import com.cloud.bridge.service.exception.UnsupportedException; -import com.cloud.bridge.util.DateHelper; -import com.cloud.bridge.util.PolicyParser; -import com.cloud.bridge.util.StringHelper; -<<<<<<< HEAD -import com.cloud.bridge.util.Tuple; - -/** - * @author Kelven Yang -======= -import com.cloud.bridge.util.OrderedPair; -import com.cloud.bridge.util.Triple; - -/** - * @author Kelven Yang, John Zucker - * The CRUD control actions to be invoked from S3BucketAction or S3ObjectAction. ->>>>>>> 6472e7b... Now really adding the renamed files! - */ -public class S3Engine { - protected final static Logger logger = Logger.getLogger(S3Engine.class); - - private final int LOCK_ACQUIRING_TIMEOUT_SECONDS = 10; // ten seconds - - private final Map bucketAdapters = new HashMap(); - - public S3Engine() { - bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter()); - } - -<<<<<<< HEAD - /** - * We treat this simply as first a get and then a put of the object the user wants to copy. - */ -======= - - /** - * Return a S3CopyObjectResponse which represents an object being copied from source - * to destination bucket. - * Called from S3ObjectAction when copying an object. - * This can be treated as first a GET followed by a PUT of the object the user wants to copy. - */ - ->>>>>>> 6472e7b... Now really adding the renamed files! - public S3CopyObjectResponse handleRequest(S3CopyObjectRequest request) - { - S3CopyObjectResponse response = new S3CopyObjectResponse(); - - // [A] Get the object we want to copy - S3GetObjectRequest getRequest = new S3GetObjectRequest(); - getRequest.setBucketName(request.getSourceBucketName()); - getRequest.setKey(request.getSourceKey()); - getRequest.setVersion(request.getVersion()); - getRequest.setConditions( request.getConditions()); - - getRequest.setInlineData( true ); - getRequest.setReturnData( true ); - if ( MetadataDirective.COPY == request.getDirective()) - getRequest.setReturnMetadata( true ); - else getRequest.setReturnMetadata( false ); - - //-> before we do anything verify the permissions on a copy basis - String destinationBucketName = request.getDestinationBucketName(); - String destinationKeyName = request.getDestinationKey(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, destinationBucketName ); - context.setKeyName( destinationKeyName ); - context.setEvalParam( ConditionKeys.MetaData, request.getDirective().toString()); - context.setEvalParam( ConditionKeys.CopySource, "/" + request.getSourceBucketName() + "/" + request.getSourceKey()); - if (PolicyAccess.DENY == verifyPolicy( context )) - throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - - S3GetObjectResponse originalObject = handleRequest(getRequest); - int resultCode = originalObject.getResultCode(); - if (200 != resultCode) { - response.setResultCode( resultCode ); - response.setResultDescription( originalObject.getResultDescription()); - return response; - } - - response.setCopyVersion( originalObject.getVersion()); - - - // [B] Put the object into the destination bucket - S3PutObjectInlineRequest putRequest = new S3PutObjectInlineRequest(); - putRequest.setBucketName(request.getDestinationBucketName()) ; - putRequest.setKey(destinationKeyName); - if ( MetadataDirective.COPY == request.getDirective()) - putRequest.setMetaEntries(originalObject.getMetaEntries()); - else putRequest.setMetaEntries(request.getMetaEntries()); - putRequest.setAcl(request.getAcl()); // -> if via a SOAP call - putRequest.setCannedAccess(request.getCannedAccess()); // -> if via a REST call - putRequest.setContentLength(originalObject.getContentLength()); - putRequest.setData(originalObject.getData()); - - S3PutObjectInlineResponse putResp = handleRequest(putRequest); - response.setResultCode( putResp.resultCode ); - response.setResultDescription( putResp.getResultDescription()); - response.setETag( putResp.getETag()); - response.setLastModified( putResp.getLastModified()); - response.setPutVersion( putResp.getVersion()); - return response; - } - - public S3CreateBucketResponse handleRequest(S3CreateBucketRequest request) - { - S3CreateBucketResponse response = new S3CreateBucketResponse(); - String cannedAccessPolicy = request.getCannedAccess(); - String bucketName = request.getBucketName(); - response.setBucketName( bucketName ); - - verifyBucketName( bucketName, false ); - - S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket, bucketName ); - context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy ); - if (PolicyAccess.DENY == verifyPolicy( context )) - throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - - if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS)) - { -<<<<<<< HEAD - Tuple shostTuple = null; -======= - OrderedPair shost_storagelocation_pair = null; ->>>>>>> 6472e7b... Now really adding the renamed files! - boolean success = false; - try { - SBucketDao bucketDao = new SBucketDao(); - SAclDao aclDao = new SAclDao(); - - if (bucketDao.getByName(request.getBucketName()) != null) - throw new ObjectAlreadyExistsException("Bucket already exists"); - -<<<<<<< HEAD - shostTuple = allocBucketStorageHost(request.getBucketName(), null); -======= - shost_storagelocation_pair = allocBucketStorageHost(request.getBucketName(), null); ->>>>>>> 6472e7b... Now really adding the renamed files! - - SBucket sbucket = new SBucket(); - sbucket.setName(request.getBucketName()); - sbucket.setCreateTime(DateHelper.currentGMTTime()); - sbucket.setOwnerCanonicalId( UserContext.current().getCanonicalUserId()); -<<<<<<< HEAD - sbucket.setShost(shostTuple.getFirst()); - shostTuple.getFirst().getBuckets().add(sbucket); -======= - sbucket.setShost(shost_storagelocation_pair.getFirst()); - shost_storagelocation_pair.getFirst().getBuckets().add(sbucket); ->>>>>>> 6472e7b... Now really adding the renamed files! - bucketDao.save(sbucket); - - S3AccessControlList acl = request.getAcl(); - - if ( null != cannedAccessPolicy ) - setCannedAccessControls( cannedAccessPolicy, "SBucket", sbucket.getId(), sbucket ); - else if (null != acl) - aclDao.save( "SBucket", sbucket.getId(), acl ); - else setSingleAcl( "SBucket", sbucket.getId(), SAcl.PERMISSION_FULL ); - - // explicitly commit the transaction - PersistContext.commitTransaction(); - success = true; - } - finally - { -<<<<<<< HEAD - if(!success && shostTuple != null) { - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(shostTuple.getFirst()); - bucketAdapter.deleteContainer(shostTuple.getSecond(), request.getBucketName()); -======= - if(!success && shost_storagelocation_pair != null) { - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(shost_storagelocation_pair.getFirst()); - bucketAdapter.deleteContainer(shost_storagelocation_pair.getSecond(), request.getBucketName()); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - PersistContext.releaseNamedLock("bucket.creation"); - } - - } else { - throw new OutOfServiceException("Unable to acquire synchronization lock"); - } - - return response; - } - -<<<<<<< HEAD -======= - /** - * Return a S3Response which represents the effect of an object being deleted from its bucket. - * Called from S3BucketAction when deleting an object. - */ - ->>>>>>> 6472e7b... Now really adding the renamed files! - public S3Response handleRequest( S3DeleteBucketRequest request ) - { - S3Response response = new S3Response(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - - if ( sbucket != null ) - { - S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName ); -<<<<<<< HEAD - switch( verifyPolicy( context )) { - case ALLOW: - // -> bucket policy can give users permission to delete a bucket while ACLs cannot -======= - switch( verifyPolicy( context )) - { - case ALLOW: - // The bucket policy can give users permission to delete a bucket whereas ACLs cannot ->>>>>>> 6472e7b... Now really adding the renamed files! - break; - - case DENY: - throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - - case DEFAULT_DENY: - default: -<<<<<<< HEAD - // -> does not matter what the ACLs say only the owner can delete a bucket -======= - // Irrespective of what the ACLs say, only the owner can delete a bucket ->>>>>>> 6472e7b... Now really adding the renamed files! - String client = UserContext.current().getCanonicalUserId(); - if (!client.equals( sbucket.getOwnerCanonicalId())) { - throw new PermissionDeniedException( "Access Denied - only the owner can delete a bucket" ); - } - break; - } - - -<<<<<<< HEAD - // -> delete the file - Tuple tupleBucketHost = getBucketStorageHost(sbucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst()); - bucketAdapter.deleteContainer(tupleBucketHost.getSecond(), request.getBucketName()); - - // -> cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. We - // need to perform deletion of these objects related to bucket manually. - // Delete SMeta & SAcl objects: (1)Get all the objects in the bucket, (2)then all the items in each object, (3) then all meta & acl data for each item - Set objectsInBucket = sbucket.getObjectsInBucket(); - Iterator it = objectsInBucket.iterator(); -======= - // Delete the file from its storage location - OrderedPair host_storagelocation_pair = getBucketStorageHost(sbucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName()); - - // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. - // To delete SMeta & SAcl objects: - // (1)Get all the objects in the bucket, - // (2)then all the items in each object, - // (3) then all meta & acl data for each item - Set objectsInBucket = sbucket.getObjectsInBucket(); - Iterator it = objectsInBucket.iterator(); ->>>>>>> 6472e7b... Now really adding the renamed files! - while( it.hasNext()) - { - SObject oneObject = (SObject)it.next(); - Set itemsInObject = oneObject.getItems(); -<<<<<<< HEAD - Iterator is = itemsInObject.iterator(); -======= - Iterator is = itemsInObject.iterator(); ->>>>>>> 6472e7b... Now really adding the renamed files! - while( is.hasNext()) - { - SObjectItem oneItem = (SObjectItem)is.next(); - deleteMetaData( oneItem.getId()); - deleteObjectAcls( "SObjectItem", oneItem.getId()); - } - } - -<<<<<<< HEAD - // -> delete all the policy state associated with the bucket -======= - // Delete all the policy state associated with the bucket ->>>>>>> 6472e7b... Now really adding the renamed files! - try { - ServiceProvider.getInstance().deleteBucketPolicy( bucketName ); - BucketPolicyDao policyDao = new BucketPolicyDao(); - policyDao.deletePolicy( bucketName ); - } - catch( Exception e ) { - logger.error("When deleting a bucket we must try to delete its policy: ", e); - } - - deleteBucketAcls( sbucket.getId()); - bucketDao.delete( sbucket ); - response.setResultCode(204); - response.setResultDescription("OK"); - } - else - { response.setResultCode(404); - response.setResultDescription("Bucket does not exist"); - } - return response; - } - -<<<<<<< HEAD -======= - /** - * Return a S3ListBucketResponse which represents a list of up to 1000 objects contained ins the bucket. - * Called from S3BucketAction for GETting objects and for GETting object versions. - */ - ->>>>>>> 6472e7b... Now really adding the renamed files! - public S3ListBucketResponse listBucketContents(S3ListBucketRequest request, boolean includeVersions) - { - S3ListBucketResponse response = new S3ListBucketResponse(); - String bucketName = request.getBucketName(); - String prefix = request.getPrefix(); - if (prefix == null) prefix = StringHelper.EMPTY_STRING; - String marker = request.getMarker(); - if (marker == null) marker = StringHelper.EMPTY_STRING; - - String delimiter = request.getDelimiter(); - int maxKeys = request.getMaxKeys(); - if(maxKeys <= 0) maxKeys = 1000; - - SBucketDao bucketDao = new SBucketDao(); - SBucket sbucket = bucketDao.getByName(bucketName); - if (sbucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - PolicyActions action = (includeVersions ? PolicyActions.ListBucketVersions : PolicyActions.ListBucket); - S3PolicyContext context = new S3PolicyContext( action, bucketName ); - context.setEvalParam( ConditionKeys.MaxKeys, new String( "" + maxKeys )); - context.setEvalParam( ConditionKeys.Prefix, prefix ); - context.setEvalParam( ConditionKeys.Delimiter, delimiter ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ ); - - -<<<<<<< HEAD - // when we query, request one more item so that we know how to set isTruncated flag -======= - // Wen execting the query, request one more item so that we know how to set isTruncated flag ->>>>>>> 6472e7b... Now really adding the renamed files! - SObjectDao sobjectDao = new SObjectDao(); - List l = null; - - if ( includeVersions ) - l = sobjectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 ); - else l = sobjectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 ); - - response.setBucketName(bucketName); - response.setMarker(marker); - response.setMaxKeys(maxKeys); - response.setPrefix(prefix); - response.setDelimiter(delimiter); - response.setTruncated(l.size() > maxKeys); - if(l.size() > maxKeys) { - response.setNextMarker(l.get(l.size() - 1).getNameKey()); - } - -<<<<<<< HEAD - // SOAP response does not support versioning -======= - // If needed - SOAP response does not support versioning ->>>>>>> 6472e7b... Now really adding the renamed files! - response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker())); - response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys)); - return response; - } - - /** -<<<<<<< HEAD - * To check on bucket policies defined we have to (look for and) evaluate the policy on each - * bucket the user owns. - * - * @param request - * @return -======= - * Return a S3ListAllMyBucketResponse which represents a list of all buckets owned by the requester. - * Called from S3BucketAction for GETting all buckets. - * To check on bucket policies defined we have to (look for and) evaluate the policy on each - * bucket the user owns. ->>>>>>> 6472e7b... Now really adding the renamed files! - */ - public S3ListAllMyBucketsResponse handleRequest(S3ListAllMyBucketsRequest request) - { - S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse(); - SBucketDao bucketDao = new SBucketDao(); - -<<<<<<< HEAD - // -> "...you can only list buckets for which you are the owner." -======= - // "...you can only list buckets for which you are the owner." ->>>>>>> 6472e7b... Now really adding the renamed files! - List buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId()); - S3CanonicalUser owner = new S3CanonicalUser(); - owner.setID(UserContext.current().getCanonicalUserId()); - owner.setDisplayName(""); - response.setOwner(owner); - - if (buckets != null) - { - S3ListAllMyBucketsEntry[] entries = new S3ListAllMyBucketsEntry[buckets.size()]; - int i = 0; - for(SBucket bucket : buckets) - { - String bucketName = bucket.getName(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.ListAllMyBuckets, bucketName ); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_PASS ); - - entries[i] = new S3ListAllMyBucketsEntry(); - entries[i].setName(bucketName); - entries[i].setCreationDate(DateHelper.toCalendar(bucket.getCreateTime())); - i++; - } - response.setBuckets(entries); - } - return response; - } - -<<<<<<< HEAD - public S3Response handleRequest(S3SetBucketAccessControlPolicyRequest request) - { -======= - /** - * Return an S3Response representing the result of PUTTING the ACL of a given bucket. - * Called from S3BucketAction to PUT its ACL. - */ - - public S3Response handleRequest(S3SetBucketAccessControlPolicyRequest request) - { ->>>>>>> 6472e7b... Now really adding the renamed files! - S3Response response = new S3Response(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName(bucketName); - if(sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket does not exist"); - return response; - } -<<<<<<< HEAD - -======= - ->>>>>>> 6472e7b... Now really adding the renamed files! - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL ); - - SAclDao aclDao = new SAclDao(); - aclDao.save("SBucket", sbucket.getId(), request.getAcl()); - - response.setResultCode(200); - response.setResultDescription("OK"); - return response; - } - -<<<<<<< HEAD -======= - - /** - * Return a S3AccessControlPolicy representing the ACL of a given bucket. - * Called from S3BucketAction to GET its ACL. - */ - ->>>>>>> 6472e7b... Now really adding the renamed files! - public S3AccessControlPolicy handleRequest(S3GetBucketAccessControlPolicyRequest request) - { - S3AccessControlPolicy policy = new S3AccessControlPolicy(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) - throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - S3CanonicalUser owner = new S3CanonicalUser(); - owner.setID(sbucket.getOwnerCanonicalId()); - owner.setDisplayName(""); - policy.setOwner(owner); - - S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketAcl, bucketName ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ_ACL ); - - SAclDao aclDao = new SAclDao(); - List grants = aclDao.listGrants("SBucket", sbucket.getId()); - policy.setGrants(S3Grant.toGrants(grants)); - return policy; - } - - /** -<<<<<<< HEAD - * This function should be called if a multipart upload is aborted OR has completed successfully and - * the individual parts have to be cleaned up. - * - * @param bucketName - * @param uploadId - * @param verifyPermission - if false then don't check the user's permission to clean up the state - * @return -======= - * This method should be called if a multipart upload is aborted OR has completed successfully and - * the individual parts have to be cleaned up. - * Called from S3ObjectAction when executing at completion or when aborting multipart upload. - * @param bucketName - * @param uploadId - * @param verifyPermission - If false then do not check the user's permission to clean up the state ->>>>>>> 6472e7b... Now really adding the renamed files! - */ - public int freeUploadParts(String bucketName, int uploadId, boolean verifyPermission) - { - // -> we need to look up the final bucket to figure out which mount point to use to save the part in - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); - return 404; - } - -<<<<<<< HEAD - Tuple tupleBucketHost = getBucketStorageHost(bucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst()); - - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - Tuple exists = uploadDao.multipartExits( uploadId ); -======= - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - OrderedPair exists = uploadDao.multipartExits( uploadId ); ->>>>>>> 6472e7b... Now really adding the renamed files! - if (null == exists) { - logger.error( "initiateMultipartUpload failed since multipart upload" + uploadId + " does not exist" ); - return 404; - } - - // -> the multipart initiator or bucket owner can do this action by default - if (verifyPermission) - { - String initiator = uploadDao.getInitiator( uploadId ); - if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) - { - // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket - S3PolicyContext context = new S3PolicyContext( PolicyActions.AbortMultipartUpload, bucketName ); - context.setKeyName( exists.getSecond()); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - } - } - - // -> first get a list of all the uploaded files and delete one by one - S3MultipartPart[] parts = uploadDao.getParts( uploadId, 10000, 0 ); - for( int i=0; i < parts.length; i++ ) - { -<<<<<<< HEAD - bucketAdapter.deleteObject( tupleBucketHost.getSecond(), ServiceProvider.getInstance().getMultipartDir(), parts[i].getPath()); -======= - bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), parts[i].getPath()); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - uploadDao.deleteUpload( uploadId ); - return 204; - - } - catch( PermissionDeniedException e ) { - logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e); - throw e; - } - catch (Exception e) { - logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e); - return 500; - } - } - - /** - * The initiator must have permission to write to the bucket in question in order to initiate - * a multipart upload. Also check to make sure the special folder used to store parts of - * a multipart exists for this bucket. -<<<<<<< HEAD - * - * @param request -======= - * Called from S3ObjectAction during many stages of multipart upload. ->>>>>>> 6472e7b... Now really adding the renamed files! - */ - public S3PutObjectInlineResponse initiateMultipartUpload(S3PutObjectInlineRequest request) - { - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - String nameKey = request.getKey(); - - // -> does the bucket exist and can we write to it? - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); - response.setResultCode(404); - } - - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); - context.setKeyName( nameKey ); - context.setEvalParam( ConditionKeys.Acl, request.getCannedAccess()); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - - createUploadFolder( bucketName ); - - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - int uploadId = uploadDao.initiateUpload( UserContext.current().getAccessKey(), bucketName, nameKey, request.getCannedAccess(), request.getMetaEntries()); - response.setUploadId( uploadId ); - response.setResultCode(200); - - } catch( Exception e ) { - logger.error("initiateMultipartUpload exception: ", e); - response.setResultCode(500); - } - - return response; - } - - /** - * Save the object fragment in a special (i.e., hidden) directory inside the same mount point as - * the bucket location that the final object will be stored in. -<<<<<<< HEAD - * -======= - * Called from S3ObjectAction during many stages of multipart upload. ->>>>>>> 6472e7b... Now really adding the renamed files! - * @param request - * @param uploadId - * @param partNumber - * @return S3PutObjectInlineResponse - */ - public S3PutObjectInlineResponse saveUploadPart(S3PutObjectInlineRequest request, int uploadId, int partNumber) - { - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - - // -> we need to look up the final bucket to figure out which mount point to use to save the part in - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "saveUploadedPart failed since " + bucketName + " does not exist" ); - response.setResultCode(404); - } - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); - context.setKeyName( request.getKey()); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - -<<<<<<< HEAD - Tuple tupleBucketHost = getBucketStorageHost(bucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst()); -======= - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); ->>>>>>> 6472e7b... Now really adding the renamed files! - String itemFileName = new String( uploadId + "-" + partNumber ); - InputStream is = null; - - try { - is = request.getDataInputStream(); -<<<<<<< HEAD - String md5Checksum = bucketAdapter.saveObject(is, tupleBucketHost.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName); -======= - String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName); ->>>>>>> 6472e7b... Now really adding the renamed files! - response.setETag(md5Checksum); - - MultipartLoadDao uploadDao = new MultipartLoadDao(); - uploadDao.savePart( uploadId, partNumber, md5Checksum, itemFileName, (int)request.getContentLength()); - response.setResultCode(200); - - } catch (IOException e) { - logger.error("UploadPart failed due to " + e.getMessage(), e); - response.setResultCode(500); - } catch (OutOfStorageException e) { - logger.error("UploadPart failed due to " + e.getMessage(), e); - response.setResultCode(500); - } catch (Exception e) { - logger.error("UploadPart failed due to " + e.getMessage(), e); - response.setResultCode(500); - } finally { - if(is != null) { - try { - is.close(); - } catch (IOException e) { - logger.error("UploadPart unable to close stream from data handler.", e); - } - } - } - - return response; - } - - /** - * Create the real object represented by all the parts of the multipart upload. -<<<<<<< HEAD - * - * @param httpResp - servelet response handle to return the headers of the response (including version header) - * @param request - normal parameters need to create a new object (e.g., meta data) - * @param parts - list of files that make up the multipart - * @param os - response outputstream, this function can take a long time and we are required to - * keep the connection alive by returning whitespace characters back periodically. - * @return - * @throws IOException - */ - public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream os) throws IOException -======= - * Called from S3ObjectAction at completion of multipart upload. - * @param httpResp - Servlet response handle to return the headers of the response (including version header) - * @param request - Normal parameters needed to create a new object (including metadata) - * @param parts - List of files that make up the multipart - * @param outputStream - Response output stream - * N.B. - This method can be long-lasting - * We are required to keep the connection alive by returning whitespace characters back periodically. - */ - - public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream outputStream) throws IOException ->>>>>>> 6472e7b... Now really adding the renamed files! - { - // [A] Set up and initial error checking - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - String key = request.getKey(); - S3MetaDataEntry[] meta = request.getMetaEntries(); - - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "completeMultipartUpload( failed since " + bucketName + " does not exist" ); - response.setResultCode(404); - } - - // [B] Now we need to create the final re-assembled object - // -> the allocObjectItem checks for the bucket policy PutObject permissions -<<<<<<< HEAD - Tuple tupleObjectItem = allocObjectItem(bucket, key, meta, null, request.getCannedAccess()); - Tuple tupleBucketHost = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst()); - String itemFileName = tupleObjectItem.getSecond().getStoredPath(); -======= - OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, null, request.getCannedAccess()); - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); ->>>>>>> 6472e7b... Now really adding the renamed files! - - // -> Amazon defines that we must return a 200 response immediately to the client, but - // -> we don't know the version header until we hit here - httpResp.setStatus(200); - httpResp.setContentType("text/xml; charset=UTF-8"); -<<<<<<< HEAD - String version = tupleObjectItem.getSecond().getVersion(); -======= - String version = object_objectitem_pair.getSecond().getVersion(); ->>>>>>> 6472e7b... Now really adding the renamed files! - if (null != version) httpResp.addHeader( "x-amz-version-id", version ); - httpResp.flushBuffer(); - - - // [C] Re-assemble the object from its uploaded file parts - try { - // explicit transaction control to avoid holding transaction during long file concatenation process - PersistContext.commitTransaction(); - -<<<<<<< HEAD - Tuple result = bucketAdapter.concatentateObjects( tupleBucketHost.getSecond(), bucket.getName(), itemFileName, ServiceProvider.getInstance().getMultipartDir(), parts, os ); - response.setETag(result.getFirst()); - response.setLastModified(DateHelper.toCalendar( tupleObjectItem.getSecond().getLastModifiedTime())); - - SObjectItemDao itemDao = new SObjectItemDao(); - SObjectItem item = itemDao.get( tupleObjectItem.getSecond().getId()); -======= - OrderedPair result = bucketAdapter. - concatentateObjects - ( host_storagelocation_pair.getSecond(), - bucket.getName(), - itemFileName, - ServiceProvider.getInstance().getMultipartDir(), - parts, - outputStream ); - response.setETag(result.getFirst()); - response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); - - SObjectItemDao itemDao = new SObjectItemDao(); - SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); ->>>>>>> 6472e7b... Now really adding the renamed files! - item.setMd5(result.getFirst()); - item.setStoredSize(result.getSecond().longValue()); - response.setResultCode(200); - - PersistContext.getSession().save(item); - } - catch (Exception e) { - logger.error("completeMultipartUpload failed due to " + e.getMessage(), e); - } - return response; - } - -<<<<<<< HEAD -======= - /** - * Return a S3PutObjectInlineResponse which represents an object being created into a bucket - * Called from S3ObjectAction when PUTting or POTing an object. - */ - ->>>>>>> 6472e7b... Now really adding the renamed files! - public S3PutObjectInlineResponse handleRequest(S3PutObjectInlineRequest request) - { - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - String key = request.getKey(); - long contentLength = request.getContentLength(); - S3MetaDataEntry[] meta = request.getMetaEntries(); - S3AccessControlList acl = request.getAcl(); - - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - -<<<<<<< HEAD - // -> is the caller allowed to write the object? - // -> the allocObjectItem checks for the bucket policy PutObject permissions - Tuple tupleObjectItem = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess()); - Tuple tupleBucketHost = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst()); - String itemFileName = tupleObjectItem.getSecond().getStoredPath(); -======= - // Is the caller allowed to write the object? - // The allocObjectItem checks for the bucket policy PutObject permissions - OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess()); - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); ->>>>>>> 6472e7b... Now really adding the renamed files! - InputStream is = null; - - try { - // explicit transaction control to avoid holding transaction during file-copy process - PersistContext.commitTransaction(); - - is = request.getDataInputStream(); -<<<<<<< HEAD - String md5Checksum = bucketAdapter.saveObject(is, tupleBucketHost.getSecond(), bucket.getName(), itemFileName); - response.setETag(md5Checksum); - response.setLastModified(DateHelper.toCalendar( tupleObjectItem.getSecond().getLastModifiedTime())); - response.setVersion( tupleObjectItem.getSecond().getVersion()); - - SObjectItemDao itemDao = new SObjectItemDao(); - SObjectItem item = itemDao.get( tupleObjectItem.getSecond().getId()); -======= - String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); - response.setETag(md5Checksum); - response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); - response.setVersion( object_objectitem_pair.getSecond().getVersion()); - - SObjectItemDao itemDao = new SObjectItemDao(); - SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); ->>>>>>> 6472e7b... Now really adding the renamed files! - item.setMd5(md5Checksum); - item.setStoredSize(contentLength); - PersistContext.getSession().save(item); - - } catch (IOException e) { - logger.error("PutObjectInline failed due to " + e.getMessage(), e); - } catch (OutOfStorageException e) { - logger.error("PutObjectInline failed due to " + e.getMessage(), e); - } finally { - if(is != null) { - try { - is.close(); - } catch (IOException e) { - logger.error("PutObjectInline unable to close stream from data handler.", e); - } - } - } - - return response; - } -<<<<<<< HEAD -======= - - /** - * Return a S3PutObjectResponse which represents an object being created into a bucket - * Called from S3RestServlet when processing a DIME request. - */ ->>>>>>> 6472e7b... Now really adding the renamed files! - - public S3PutObjectResponse handleRequest(S3PutObjectRequest request) - { - S3PutObjectResponse response = new S3PutObjectResponse(); - String bucketName = request.getBucketName(); - String key = request.getKey(); - long contentLength = request.getContentLength(); - S3MetaDataEntry[] meta = request.getMetaEntries(); - S3AccessControlList acl = request.getAcl(); - - SBucketDao bucketDao = new SBucketDao(); - SBucket bucket = bucketDao.getByName(bucketName); - if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - -<<<<<<< HEAD - // -> is the caller allowed to write the object? - // -> the allocObjectItem checks for the bucket policy PutObject permissions - Tuple tupleObjectItem = allocObjectItem(bucket, key, meta, acl, null); - Tuple tupleBucketHost = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst()); - String itemFileName = tupleObjectItem.getSecond().getStoredPath(); -======= - // Is the caller allowed to write the object? - // The allocObjectItem checks for the bucket policy PutObject permissions - OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null); - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); ->>>>>>> 6472e7b... Now really adding the renamed files! - InputStream is = null; - try { - // explicit transaction control to avoid holding transaction during file-copy process - PersistContext.commitTransaction(); - - is = request.getInputStream(); -<<<<<<< HEAD - String md5Checksum = bucketAdapter.saveObject(is, tupleBucketHost.getSecond(), bucket.getName(), itemFileName); - response.setETag(md5Checksum); - response.setLastModified(DateHelper.toCalendar( tupleObjectItem.getSecond().getLastModifiedTime())); - - SObjectItemDao itemDao = new SObjectItemDao(); - SObjectItem item = itemDao.get( tupleObjectItem.getSecond().getId()); -======= - String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); - response.setETag(md5Checksum); - response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); - - SObjectItemDao itemDao = new SObjectItemDao(); - SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); ->>>>>>> 6472e7b... Now really adding the renamed files! - item.setMd5(md5Checksum); - item.setStoredSize(contentLength); - PersistContext.getSession().save(item); - - } catch (OutOfStorageException e) { - logger.error("PutObject failed due to " + e.getMessage(), e); - } finally { - if(is != null) { - try { - is.close(); - } catch (IOException e) { - logger.error("Unable to close stream from data handler.", e); - } - } - } - - return response; - } - - /** - * The ACL of an object is set at the object version level. By default, PUT sets the ACL of the latest -<<<<<<< HEAD - * version of an object. To set the ACL of a different version, use the versionId subresource. - * - * @param request - * @return - */ -======= - * version of an object. To set the ACL of a different version, using the versionId subresource. - * Called from S3ObjectAction to PUT an object's ACL. - */ - ->>>>>>> 6472e7b... Now really adding the renamed files! - public S3Response handleRequest(S3SetObjectAccessControlPolicyRequest request) - { - S3PolicyContext context = null; - - // [A] First find the object in the bucket - S3Response response = new S3Response(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - if(sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket " + bucketName + "does not exist"); - return response; - } - - SObjectDao sobjectDao = new SObjectDao(); - String nameKey = request.getKey(); - SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey ); - if(sobject == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " in bucket " + bucketName + " does not exist"); - return response; - } - - String deletionMark = sobject.getDeletionMark(); - if (null != deletionMark) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); - return response; - } - - - // [B] Versioning allow the client to ask for a specific version not just the latest - SObjectItem item = null; - int versioningStatus = sbucket.getVersioningStatus(); - String wantVersion = request.getVersion(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) - item = sobject.getVersion( wantVersion ); - else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - - if (item == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); - return response; - } - - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { - context = new S3PolicyContext( PolicyActions.PutObjectAclVersion, bucketName ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - response.setVersion( item.getVersion()); - } - else context = new S3PolicyContext( PolicyActions.PutObjectAcl, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_WRITE_ACL ); - - // -> the acl always goes on the instance of the object - SAclDao aclDao = new SAclDao(); - aclDao.save("SObjectItem", item.getId(), request.getAcl()); - - response.setResultCode(200); - response.setResultDescription("OK"); - return response; - } - - /** - * By default, GET returns ACL information about the latest version of an object. To return ACL - * information about a different version, use the versionId subresource -<<<<<<< HEAD - * - * @param request - * @return - */ -======= - * Called from S3ObjectAction to get an object's ACL. - */ - ->>>>>>> 6472e7b... Now really adding the renamed files! - public S3AccessControlPolicy handleRequest(S3GetObjectAccessControlPolicyRequest request) - { - S3PolicyContext context = null; - - // [A] Does the object exist that holds the ACL we are looking for? - S3AccessControlPolicy policy = new S3AccessControlPolicy(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) - throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - SObjectDao sobjectDao = new SObjectDao(); - String nameKey = request.getKey(); - SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey ); - if (sobject == null) - throw new NoSuchObjectException("Object " + request.getKey() + " does not exist"); - - String deletionMark = sobject.getDeletionMark(); - if (null != deletionMark) { - policy.setResultCode(404); - policy.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); - return policy; - } - - - // [B] Versioning allow the client to ask for a specific version not just the latest - SObjectItem item = null; - int versioningStatus = sbucket.getVersioningStatus(); - String wantVersion = request.getVersion(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) - item = sobject.getVersion( wantVersion ); - else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - - if (item == null) { - policy.setResultCode(404); - policy.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); - return policy; - } - - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { - context = new S3PolicyContext( PolicyActions.GetObjectVersionAcl, bucketName ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - policy.setVersion( item.getVersion()); - } - else context = new S3PolicyContext( PolicyActions.GetObjectAcl, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ_ACL ); - - - // [C] ACLs are ALWAYS on an instance of the object - S3CanonicalUser owner = new S3CanonicalUser(); - owner.setID(sobject.getOwnerCanonicalId()); - owner.setDisplayName(""); - policy.setOwner(owner); - policy.setResultCode(200); - - SAclDao aclDao = new SAclDao(); - List grants = aclDao.listGrants( "SObjectItem", item.getId()); - policy.setGrants(S3Grant.toGrants(grants)); - return policy; - } - - /** -<<<<<<< HEAD - * Implements both GetObject and GetObjectExtended. - * - * @param request - * @return - */ -======= - * Handle requests for GET object and HEAD "get object extended" - * Called from S3ObjectAction for GET and HEAD of an object. - */ - ->>>>>>> 6472e7b... Now really adding the renamed files! - public S3GetObjectResponse handleRequest(S3GetObjectRequest request) - { - S3GetObjectResponse response = new S3GetObjectResponse(); - S3PolicyContext context = null; - boolean ifRange = false; - long bytesStart = request.getByteRangeStart(); - long bytesEnd = request.getByteRangeEnd(); - int resultCode = 200; - - // [A] Verify that the bucket and the object exist - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName(bucketName); - if (sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket " + request.getBucketName() + " does not exist"); - return response; - } - - SObjectDao objectDao = new SObjectDao(); - String nameKey = request.getKey(); - SObject sobject = objectDao.getByNameKey( sbucket, nameKey ); - if (sobject == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " does not exist in bucket " + request.getBucketName()); - return response; - } - - String deletionMark = sobject.getDeletionMark(); - if (null != deletionMark) { - response.setDeleteMarker( deletionMark ); - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); - return response; - } - - - // [B] Versioning allow the client to ask for a specific version not just the latest - SObjectItem item = null; - int versioningStatus = sbucket.getVersioningStatus(); - String wantVersion = request.getVersion(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) - item = sobject.getVersion( wantVersion ); - else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - - if (item == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); - return response; - } - - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { - context = new S3PolicyContext( PolicyActions.GetObjectVersion, bucketName ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - } - else context = new S3PolicyContext( PolicyActions.GetObject, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ ); - - - // [C] Handle all the IFModifiedSince ... conditions, and access privileges - // -> http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 (HTTP If-Range header) - if (request.isReturnCompleteObjectOnConditionFailure() && (0 <= bytesStart && 0 <= bytesEnd)) ifRange = true; - - resultCode = conditionPassed( request.getConditions(), item.getLastModifiedTime(), item.getMd5(), ifRange ); - if ( -1 == resultCode ) { - // -> If-Range implementation, we have to return the entire object - resultCode = 200; - bytesStart = -1; - bytesEnd = -1; - } - else if (200 != resultCode) { - response.setResultCode( resultCode ); - response.setResultDescription( "Precondition Failed" ); - return response; - } - - - // [D] Return the contents of the object inline - // -> extract the meta data that corresponds the specific versioned item - SMetaDao metaDao = new SMetaDao(); - List itemMetaData = metaDao.getByTarget( "SObjectItem", item.getId()); - if (null != itemMetaData) - { - int i = 0; - S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ]; -<<<<<<< HEAD - ListIterator it = itemMetaData.listIterator(); -======= - ListIterator it = itemMetaData.listIterator(); ->>>>>>> 6472e7b... Now really adding the renamed files! - while( it.hasNext()) { - SMeta oneTag = (SMeta)it.next(); - S3MetaDataEntry oneEntry = new S3MetaDataEntry(); - oneEntry.setName( oneTag.getName()); - oneEntry.setValue( oneTag.getValue()); - metaEntries[i++] = oneEntry; - } - response.setMetaEntries( metaEntries ); - } - - // -> support a single byte range - if ( 0 <= bytesStart && 0 <= bytesEnd ) { - response.setContentLength( bytesEnd - bytesStart ); - resultCode = 206; - } - else response.setContentLength( item.getStoredSize()); - - if(request.isReturnData()) - { - response.setETag(item.getMd5()); - response.setLastModified(DateHelper.toCalendar( item.getLastModifiedTime())); - response.setVersion( item.getVersion()); - if (request.isInlineData()) - { -<<<<<<< HEAD - Tuple tupleSHostInfo = getBucketStorageHost(sbucket); -======= - OrderedPair tupleSHostInfo = getBucketStorageHost(sbucket); ->>>>>>> 6472e7b... Now really adding the renamed files! - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst()); - - if ( 0 <= bytesStart && 0 <= bytesEnd ) - response.setData(bucketAdapter.loadObjectRange(tupleSHostInfo.getSecond(), - request.getBucketName(), item.getStoredPath(), bytesStart, bytesEnd )); - else response.setData(bucketAdapter.loadObject(tupleSHostInfo.getSecond(), request.getBucketName(), item.getStoredPath())); - } - } - - response.setResultCode( resultCode ); - response.setResultDescription("OK"); - return response; - } - - /** -<<<<<<< HEAD - * In one place we handle both versioning and non-versioning delete requests. - */ - public S3Response handleRequest(S3DeleteObjectRequest request) - { - // -> verify that the bucket and object exist -======= - * Handle object deletion requests, both versioning and non-versioning requirements. - * Called from S3ObjectAction for deletion. - */ - public S3Response handleRequest(S3DeleteObjectRequest request) - { - // Verify that the bucket and object exist ->>>>>>> 6472e7b... Now really adding the renamed files! - S3Response response = new S3Response(); - SBucketDao bucketDao = new SBucketDao(); - String bucketName = request.getBucketName(); - SBucket sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) { - response.setResultCode(404); -<<<<<<< HEAD - response.setResultDescription("Bucket does not exist"); -======= - response.setResultDescription("Bucket " + bucketName + " does not exist"); ->>>>>>> 6472e7b... Now really adding the renamed files! - return response; - } - - SObjectDao objectDao = new SObjectDao(); - String nameKey = request.getKey(); - SObject sobject = objectDao.getByNameKey( sbucket, nameKey ); - if (sobject == null) { - response.setResultCode(404); -<<<<<<< HEAD - response.setResultDescription("Bucket does not exist"); -======= - response.setResultDescription("No object with key " + nameKey + " exists in bucket " + bucketName); ->>>>>>> 6472e7b... Now really adding the renamed files! - return response; - } - - -<<<<<<< HEAD - // -> versioning controls what delete means -======= - // Discover whether versioning is enabled. If so versioning requires the setting of a deletion marker. ->>>>>>> 6472e7b... Now really adding the renamed files! - String storedPath = null; - SObjectItem item = null; - int versioningStatus = sbucket.getVersioningStatus(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) - { - String wantVersion = request.getVersion(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObjectVersion, bucketName ); - context.setKeyName( nameKey ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); - - if (null == wantVersion) { -<<<<<<< HEAD - // -> if versioning is on and no versionId is given then we just write a deletion marker -======= - // If versioning is on and no versionId is given then we just write a deletion marker ->>>>>>> 6472e7b... Now really adding the renamed files! - sobject.setDeletionMark( UUID.randomUUID().toString()); - objectDao.update( sobject ); - } - else { -<<<<<<< HEAD - // -> are we removing the delete marker? -======= - // Otherwise remove the deletion marker if this has been set ->>>>>>> 6472e7b... Now really adding the renamed files! - String deletionMarker = sobject.getDeletionMark(); - if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) { - sobject.setDeletionMark( null ); - objectDao.update( sobject ); - response.setResultCode(204); - return response; - } - -<<<<<<< HEAD - // -> if versioning is on and the versionId is given then we delete the object matching that version -======= - // If versioning is on and the versionId is given (non-null) then delete the object matching that version ->>>>>>> 6472e7b... Now really adding the renamed files! - if ( null == (item = sobject.getVersion( wantVersion ))) { - response.setResultCode(404); - return response; - } - else { -<<<<<<< HEAD - // -> just delete the one item that matches the versionId from the database -======= - // Providing versionId is non-null, then just delete the one item that matches the versionId from the database ->>>>>>> 6472e7b... Now really adding the renamed files! - storedPath = item.getStoredPath(); - sobject.deleteItem( item.getId()); - objectDao.update( sobject ); - } - } - } - else -<<<<<<< HEAD - { // -> if versioning is off then we do delete the null object -======= - { // If versioning is off then we do delete the null object ->>>>>>> 6472e7b... Now really adding the renamed files! - S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObject, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); - - if ( null == (item = sobject.getLatestVersion( true ))) { - response.setResultCode(404); - return response; - } - else { -<<<<<<< HEAD - // -> if no item with a null version then we are done - if (null == item.getVersion()) { - // -> remove the entire object - // -> cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects. -======= - // If there is no item with a null version then we are done - if (null == item.getVersion()) { - // Otherwiswe remove the entire object - // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects. ->>>>>>> 6472e7b... Now really adding the renamed files! - storedPath = item.getStoredPath(); - deleteMetaData( item.getId()); - deleteObjectAcls( "SObjectItem", item.getId()); - objectDao.delete( sobject ); - } - } - } - -<<<<<<< HEAD - // -> delete the file holding the object - if (null != storedPath) - { - Tuple tupleBucketHost = getBucketStorageHost( sbucket ); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( tupleBucketHost.getFirst()); - bucketAdapter.deleteObject( tupleBucketHost.getSecond(), bucketName, storedPath ); -======= - // Delete the file holding the object - if (null != storedPath) - { - OrderedPair host_storagelocation_pair = getBucketStorageHost( sbucket ); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( host_storagelocation_pair.getFirst()); - bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), bucketName, storedPath ); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - response.setResultCode(204); - return response; - } - - - private void deleteMetaData( long itemId ) { - SMetaDao metaDao = new SMetaDao(); - List itemMetaData = metaDao.getByTarget( "SObjectItem", itemId ); - if (null != itemMetaData) - { -<<<<<<< HEAD - ListIterator it = itemMetaData.listIterator(); -======= - ListIterator it = itemMetaData.listIterator(); ->>>>>>> 6472e7b... Now really adding the renamed files! - while( it.hasNext()) { - SMeta oneTag = (SMeta)it.next(); - metaDao.delete( oneTag ); - } - } - } - - private void deleteObjectAcls( String target, long itemId ) { - SAclDao aclDao = new SAclDao(); - List itemAclData = aclDao.listGrants( target, itemId ); - if (null != itemAclData) - { -<<<<<<< HEAD - ListIterator it = itemAclData.listIterator(); -======= - ListIterator it = itemAclData.listIterator(); ->>>>>>> 6472e7b... Now really adding the renamed files! - while( it.hasNext()) { - SAcl oneTag = (SAcl)it.next(); - aclDao.delete( oneTag ); - } - } - } - - private void deleteBucketAcls( long bucketId ) { - SAclDao aclDao = new SAclDao(); - List bucketAclData = aclDao.listGrants( "SBucket", bucketId ); - if (null != bucketAclData) - { -<<<<<<< HEAD - ListIterator it = bucketAclData.listIterator(); -======= - ListIterator it = bucketAclData.listIterator(); ->>>>>>> 6472e7b... Now really adding the renamed files! - while( it.hasNext()) { - SAcl oneTag = (SAcl)it.next(); - aclDao.delete( oneTag ); - } - } - } - - private S3ListBucketPrefixEntry[] composeListBucketPrefixEntries(List l, String prefix, String delimiter, int maxKeys) - { - List entries = new ArrayList(); - int count = 0; - - for(SObject sobject : l) - { - if(delimiter != null && !delimiter.isEmpty()) - { - String subName = StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter); - if(subName != null) - { - S3ListBucketPrefixEntry entry = new S3ListBucketPrefixEntry(); - if ( prefix != null && prefix.length() > 0) - entry.setPrefix(prefix + delimiter + subName); - else entry.setPrefix(subName); - } - } - count++; - if(count >= maxKeys) break; - } - - if(entries.size() > 0) return entries.toArray(new S3ListBucketPrefixEntry[0]); - return null; - } - - /** - * The 'versionIdMarker' parameter only makes sense if enableVersion is true. - * versionIdMarker is the starting point to return information back. So for example if an - * object has versions 1,2,3,4,5 and the versionIdMarker is '3', then 3,4,5 will be returned - * by this function. If the versionIdMarker is null then all versions are returned. - * - * TODO - how does the versionIdMarker work when there is a deletion marker in the object? - */ - private S3ListBucketObjectEntry[] composeListBucketContentEntries(List l, String prefix, String delimiter, int maxKeys, boolean enableVersion, String versionIdMarker) - { - List entries = new ArrayList(); - SObjectItem latest = null; - boolean hitIdMarker = false; - int count = 0; - - for( SObject sobject : l ) - { - if (delimiter != null && !delimiter.isEmpty()) - { - if (StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter) != null) - continue; - } - - if (enableVersion) - { - hitIdMarker = (null == versionIdMarker ? true : false); - -<<<<<<< HEAD - // -> this supports the REST call GET /?versions - String deletionMarker = sobject.getDeletionMark(); - if ( null != deletionMarker ) - { - // -> TODO we don't save the timestamp when something is deleted -======= - // This supports GET REST calls with /?versions - String deletionMarker = sobject.getDeletionMark(); - if ( null != deletionMarker ) - { - // TODO we should also save the timestamp when something is deleted ->>>>>>> 6472e7b... Now really adding the renamed files! - S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); - entry.setKey(sobject.getNameKey()); - entry.setVersion( deletionMarker ); - entry.setIsLatest( true ); - entry.setIsDeletionMarker( true ); - entry.setLastModified( Calendar.getInstance( TimeZone.getTimeZone("GMT") )); - entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); - entry.setOwnerDisplayName(""); - entries.add( entry ); - latest = null; - } - else latest = sobject.getLatestVersion( false ); - - Iterator it = sobject.getItems().iterator(); - while( it.hasNext()) - { - SObjectItem item = (SObjectItem)it.next(); - - if ( !hitIdMarker ) - { - if (item.getVersion().equalsIgnoreCase( versionIdMarker )) { - hitIdMarker = true; - entries.add( toListEntry( sobject, item, latest )); - } - } - else entries.add( toListEntry( sobject, item, latest )); - } - } - else - { // -> if there are multiple versions of an object then just return its last version - Iterator it = sobject.getItems().iterator(); - SObjectItem lastestItem = null; - int maxVersion = 0; - int version = 0; - while(it.hasNext()) - { - SObjectItem item = (SObjectItem)it.next(); - String versionStr = item.getVersion(); - - if ( null != versionStr ) - version = Integer.parseInt(item.getVersion()); - else lastestItem = item; - - // -> if the bucket has versions turned on - if (version > maxVersion) { - maxVersion = version; - lastestItem = item; - } - } - if (lastestItem != null) { - entries.add( toListEntry( sobject, lastestItem, null )); - } - } - - count++; - if(count >= maxKeys) break; - } - - if ( entries.size() > 0 ) - return entries.toArray(new S3ListBucketObjectEntry[0]); - else return null; - } - - private static S3ListBucketObjectEntry toListEntry( SObject sobject, SObjectItem item, SObjectItem latest ) - { - S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); - entry.setKey(sobject.getNameKey()); - entry.setVersion( item.getVersion()); - entry.setETag( "\"" + item.getMd5() + "\"" ); - entry.setSize(item.getStoredSize()); - entry.setStorageClass( "STANDARD" ); - entry.setLastModified(DateHelper.toCalendar(item.getLastModifiedTime())); - entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); - entry.setOwnerDisplayName(""); - - if (null != latest && item == latest) entry.setIsLatest( true ); - return entry; - } - -<<<<<<< HEAD - public Tuple getBucketStorageHost(SBucket bucket) -======= - private OrderedPair getBucketStorageHost(SBucket bucket) ->>>>>>> 6472e7b... Now really adding the renamed files! - { - MHostMountDao mountDao = new MHostMountDao(); - - SHost shost = bucket.getShost(); - if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_LOCAL) { -<<<<<<< HEAD - return new Tuple(shost, shost.getExportRoot()); -======= - return new OrderedPair(shost, shost.getExportRoot()); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - MHostMount mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId()); - if(mount != null) { -<<<<<<< HEAD - return new Tuple(shost, mount.getMountPath()); -======= - return new OrderedPair(shost, mount.getMountPath()); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - // need to redirect request to other node - throw new HostNotMountedException("Storage host " + shost.getHost() + " is not locally mounted"); - } - - /** - * Locate the folder to hold upload parts at the same mount point as the upload's final bucket - * location. Create the upload folder dynamically. - * - * @param bucketName - */ - private void createUploadFolder(String bucketName) - { - if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS)) - { - try { - allocBucketStorageHost(bucketName, ServiceProvider.getInstance().getMultipartDir()); - } - finally { - PersistContext.releaseNamedLock("bucket.creation"); - } - } - } - - /** - * The overrideName is used to create a hidden storage bucket (folder) in the same location - * as the given bucketName. This can be used to create a folder for parts of a multipart - * upload for the associated bucket. - * - * @param bucketName - * @param overrideName - * @return - */ -<<<<<<< HEAD - private Tuple allocBucketStorageHost(String bucketName, String overrideName) -======= - private OrderedPair allocBucketStorageHost(String bucketName, String overrideName) ->>>>>>> 6472e7b... Now really adding the renamed files! - { - MHostDao mhostDao = new MHostDao(); - SHostDao shostDao = new SHostDao(); - - MHost mhost = mhostDao.get(ServiceProvider.getInstance().getManagementHostId()); - if(mhost == null) - throw new OutOfServiceException("Temporarily out of service"); - - if(mhost.getMounts().size() > 0) { - Random random = new Random(); - MHostMount[] mounts = (MHostMount[])mhost.getMounts().toArray(); - MHostMount mount = mounts[random.nextInt(mounts.length)]; - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(mount.getShost()); - bucketAdapter.createContainer(mount.getMountPath(), (null != overrideName ? overrideName : bucketName)); -<<<<<<< HEAD - return new Tuple(mount.getShost(), mount.getMountPath()); - } - - // To make things simple, only allow one local mounted storage root -======= - return new OrderedPair(mount.getShost(), mount.getMountPath()); - } - - // To make things simple, only allow one local mounted storage root TODO - Change in the future ->>>>>>> 6472e7b... Now really adding the renamed files! - String localStorageRoot = ServiceProvider.getInstance().getStartupProperties().getProperty("storage.root"); - if(localStorageRoot != null) { - SHost localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot); - if(localSHost == null) - throw new InternalErrorException("storage.root is configured but not initialized"); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(localSHost); - bucketAdapter.createContainer(localSHost.getExportRoot(),(null != overrideName ? overrideName : bucketName)); -<<<<<<< HEAD - return new Tuple(localSHost, localStorageRoot); -======= - return new OrderedPair(localSHost, localStorageRoot); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - throw new OutOfStorageException("No storage host is available"); - } - - public S3BucketAdapter getStorageHostBucketAdapter(SHost shost) - { - S3BucketAdapter adapter = bucketAdapters.get(shost.getHostType()); - if(adapter == null) - throw new InternalErrorException("Bucket adapter is not installed for host type: " + shost.getHostType()); - - return adapter; - } - - /** - * If acl is set then the cannedAccessPolicy parameter should be null and is ignored. - * The cannedAccessPolicy parameter is for REST Put requests only where a simple set of ACLs can be - * created with a single header value. Note that we do not currently support "anonymous" un-authenticated - * access in our implementation. - * - * @throws IOException - */ - @SuppressWarnings("deprecation") -<<<<<<< HEAD - public Tuple allocObjectItem(SBucket bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy) -======= - public OrderedPair allocObjectItem(SBucket bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy) ->>>>>>> 6472e7b... Now really adding the renamed files! - { - SObjectDao objectDao = new SObjectDao(); - SObjectItemDao objectItemDao = new SObjectItemDao(); - SMetaDao metaDao = new SMetaDao(); - SAclDao aclDao = new SAclDao(); - SObjectItem item = null; - int versionSeq = 1; - int versioningStatus = bucket.getVersioningStatus(); - - Session session = PersistContext.getSession(); - - // [A] To write into a bucket the user must have write permission to that bucket - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucket.getName()); - context.setKeyName( nameKey ); - context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy); -<<<<<<< HEAD - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - - // [A] If versioning is off them we over write a null object item -======= - - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); // TODO - check this validates plain POSTs - - // [B] If versioning is off them we over write a null object item ->>>>>>> 6472e7b... Now really adding the renamed files! - SObject object = objectDao.getByNameKey(bucket, nameKey); - if ( object != null ) - { - // -> if versioning is on create new object items - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) - { - session.lock(object, LockMode.UPGRADE); - versionSeq = object.getNextSequence(); - object.setNextSequence(versionSeq + 1); - session.save(object); - - item = new SObjectItem(); - item.setTheObject(object); - object.getItems().add(item); - item.setVersion(String.valueOf(versionSeq)); - Date ts = DateHelper.currentGMTTime(); - item.setCreateTime(ts); - item.setLastAccessTime(ts); - item.setLastModifiedTime(ts); - session.save(item); - } - else - { // -> find an object item with a null version, can be null - // if bucket started out with versioning enabled and was then suspended - item = objectItemDao.getByObjectIdNullVersion( object.getId()); - if (item == null) - { - item = new SObjectItem(); - item.setTheObject(object); - object.getItems().add(item); - Date ts = DateHelper.currentGMTTime(); - item.setCreateTime(ts); - item.setLastAccessTime(ts); - item.setLastModifiedTime(ts); - session.save(item); - } - } - } - else - { // -> there is no object nor an object item - object = new SObject(); - object.setBucket(bucket); - object.setNameKey(nameKey); - object.setNextSequence(2); - object.setCreateTime(DateHelper.currentGMTTime()); - object.setOwnerCanonicalId(UserContext.current().getCanonicalUserId()); - session.save(object); - - item = new SObjectItem(); - item.setTheObject(object); - object.getItems().add(item); - if (SBucket.VERSIONING_ENABLED == versioningStatus) item.setVersion(String.valueOf(versionSeq)); - Date ts = DateHelper.currentGMTTime(); - item.setCreateTime(ts); - item.setLastAccessTime(ts); - item.setLastModifiedTime(ts); - session.save(item); - } - - - // [C] We will use the item DB id as the file name, MD5/contentLength will be stored later - String suffix = null; - int dotPos = nameKey.lastIndexOf('.'); - if (dotPos >= 0) suffix = nameKey.substring(dotPos); - if ( suffix != null ) - item.setStoredPath(String.valueOf(item.getId()) + suffix); - else item.setStoredPath(String.valueOf(item.getId())); - - metaDao.save("SObjectItem", item.getId(), meta); - - - // [D] Are we setting an ACL along with the object - // -> the ACL is ALWAYS set on a particular instance of the object (i.e., a version) - if ( null != cannedAccessPolicy ) - { - setCannedAccessControls( cannedAccessPolicy, "SObjectItem", item.getId(), bucket ); - } - else if (null == acl || 0 == acl.size()) - { - // -> this is termed the "private" or default ACL, "Owner gets FULL_CONTROL" - setSingleAcl( "SObjectItem", item.getId(), SAcl.PERMISSION_FULL ); - } - else if (null != acl) { - aclDao.save( "SObjectItem", item.getId(), acl ); - } - - session.update(item); -<<<<<<< HEAD - return new Tuple(object, item); -======= - return new OrderedPair(object, item); ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - - /** - * Access controls that are specified via the "x-amz-acl:" headers in REST requests. - * Note that canned policies can be set when the object's contents are set - */ -<<<<<<< HEAD - private void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucket bucket ) - { - if ( cannedAccessPolicy.equalsIgnoreCase( "public-read" )) - { - // -> owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ access. - setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, SAcl.PERMISSION_READ, "A" ); - } - else if (cannedAccessPolicy.equalsIgnoreCase( "public-read-write" )) - { - // -> owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ and WRITE access - setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, (SAcl.PERMISSION_READ | SAcl.PERMISSION_WRITE), "A" ); - } - else if (cannedAccessPolicy.equalsIgnoreCase( "authenticated-read" )) - { - // -> Owner gets FULL_CONTROL and ANY principal authenticated as a registered S3 user (the '*' symbol here) is granted READ access - setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, SAcl.PERMISSION_READ, "*" ); - } - else if (cannedAccessPolicy.equalsIgnoreCase( "private" )) - { - // -> this is termed the "private" or default ACL, "Owner gets FULL_CONTROL" - setSingleAcl( target, objectId, SAcl.PERMISSION_FULL ); - } - else if (cannedAccessPolicy.equalsIgnoreCase( "bucket-owner-read" )) - { - // -> Object Owner gets FULL_CONTROL, Bucket Owner gets READ - // -> is equivalent to private when used with PUT Bucket - if ( target.equalsIgnoreCase( "SBucket" )) - setSingleAcl( target, objectId, SAcl.PERMISSION_FULL ); - else setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, SAcl.PERMISSION_READ, bucket.getOwnerCanonicalId()); - } - else if (cannedAccessPolicy.equalsIgnoreCase( "bucket-owner-full-control" )) - { - // -> Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL - // -> is equivalent to private when used with PUT Bucket - if ( target.equalsIgnoreCase( "SBucket" )) - setSingleAcl( target, objectId, SAcl.PERMISSION_FULL ); - else setDefaultAcls( target, objectId, SAcl.PERMISSION_FULL, SAcl.PERMISSION_FULL, bucket.getOwnerCanonicalId()); - } - else throw new UnsupportedException( "Unknown Canned Access Policy: " + cannedAccessPolicy + " is not supported" ); -======= - public void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucket bucket ) - { - // Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy - Triple permission_permission_symbol_triple = - SAcl.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId()); - if ( null == permission_permission_symbol_triple.getThird() ) - setSingleAcl(target, objectId, permission_permission_symbol_triple.getFirst()); - else - { setDefaultAcls( target, - objectId, - permission_permission_symbol_triple.getFirst(), // permission according to ownership of object - permission_permission_symbol_triple.getSecond(), // permission according to ownership of bucket - permission_permission_symbol_triple.getThird() ); // "symbol" to indicate principal or otherwise name of owner - - } ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - - private void setSingleAcl( String target, long targetId, int permission ) - { - SAclDao aclDao = new SAclDao(); - S3AccessControlList defaultAcl = new S3AccessControlList(); - - // -> if an annoymous request, then do not rewrite the ACL - String userId = UserContext.current().getCanonicalUserId(); - if (0 < userId.length()) - { - S3Grant defaultGrant = new S3Grant(); - defaultGrant.setGrantee(SAcl.GRANTEE_USER); - defaultGrant.setCanonicalUserID( userId ); - defaultGrant.setPermission( permission ); - defaultAcl.addGrant( defaultGrant ); - aclDao.save( target, targetId, defaultAcl ); - } - } -<<<<<<< HEAD - - /** - * Note that we use the Cloud Stack API Access key for the Canonical User Id everywhere - * (i.e., for buckets, and objects). -======= - - - /** - * The Cloud Stack API Access key is used for for the Canonical User Id everywhere (buckets and objects). ->>>>>>> 6472e7b... Now really adding the renamed files! - * - * @param owner - this can be the Cloud Access Key for a bucket owner or one of the - * following special symbols: - * (a) '*' - any principal authenticated user (i.e., any user with a registered Cloud Access Key) - * (b) 'A' - any anonymous principal (i.e., S3 request without an Authorization header) - */ - private void setDefaultAcls( String target, long objectId, int permission1, int permission2, String owner ) - { - SAclDao aclDao = new SAclDao(); - S3AccessControlList defaultAcl = new S3AccessControlList(); - - // -> object owner - S3Grant defaultGrant = new S3Grant(); - defaultGrant.setGrantee(SAcl.GRANTEE_USER); - defaultGrant.setCanonicalUserID( UserContext.current().getCanonicalUserId()); - defaultGrant.setPermission( permission1 ); - defaultAcl.addGrant( defaultGrant ); - - // -> bucket owner - defaultGrant = new S3Grant(); - defaultGrant.setGrantee(SAcl.GRANTEE_USER); - defaultGrant.setCanonicalUserID( owner ); - defaultGrant.setPermission( permission2 ); - defaultAcl.addGrant( defaultGrant ); - aclDao.save( target, objectId, defaultAcl ); - } - - public static PolicyAccess verifyPolicy( S3PolicyContext context ) - { - S3BucketPolicy policy = null; - -<<<<<<< HEAD - // -> on error of getting a policy ignore it -======= - // Ordinarily a REST request will pass in an S3PolicyContext for a given bucket by this stage. The HttpServletRequest object - // should be held in the UserContext ready for extraction of the S3BucketPolicy. - // If there is an error in obtaining the request object or in loading the policy then log the failure and return a S3PolicyContext - // which indicates DEFAULT_DENY. Where there is no failure, the policy returned should be specific to the Canonical User ID of the requester. - ->>>>>>> 6472e7b... Now really adding the renamed files! - try { - // -> in SOAP the HttpServletRequest object is hidden and not passed around - if (null != context) { - context.setHttp( UserContext.current().getHttp()); - policy = loadPolicy( context ); - } - - if ( null != policy ) - return policy.eval(context, UserContext.current().getCanonicalUserId()); - else return PolicyAccess.DEFAULT_DENY; - } - catch( Exception e ) { - logger.error("verifyAccess - loadPolicy failed, bucket: " + context.getBucketName() + " policy ignored", e); - return PolicyAccess.DEFAULT_DENY; - } - } - - /** - * To determine access to a bucket or an object in a bucket evaluate first a define - * bucket policy and then any defined ACLs. - * - * @param context - all data needed for bucket policies - * @param target - used for ACL evaluation, object identifier - * @param targetId - used for ACL evaluation - * @param requestedPermission - ACL type access requested - * - * @throws ParseException, SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException - */ - public static void verifyAccess( S3PolicyContext context, String target, long targetId, int requestedPermission ) - { - switch( verifyPolicy( context ) ) { - case ALLOW: // overrides ACLs (?) - return; - - case DENY: - throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - - case DEFAULT_DENY: - default: - accessAllowed( target, targetId, requestedPermission ); - break; - } - } - - /** -<<<<<<< HEAD - * This function verifies that the accessing client has the requested - * permission on the object/bucket/Acl represented by the tuble: - * - * For cases where an ACL is meant for any authenticated user we place a "*" for the - * Canonical User Id ("*" is not a legal Cloud Stack Access key). - * - * For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the - * Canonical User Id ("A" is not a legal Cloud Stack Access key). -======= - * This method verifies that the accessing client has the requested - * permission on the object/bucket/Acl represented by the tuple: - * - * For cases where an ACL is meant for any authenticated user we place a "*" for the - * Canonical User Id. N.B. - "*" is not a legal Cloud (Bridge) Access key. - * - * For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the - * Canonical User Id. N.B. - "A" is not a legal Cloud (Bridge) Access key. ->>>>>>> 6472e7b... Now really adding the renamed files! - */ - public static void accessAllowed( String target, long targetId, int requestedPermission ) - { - if (SAcl.PERMISSION_PASS == requestedPermission) return; - - SAclDao aclDao = new SAclDao(); - -<<<<<<< HEAD - // -> if an annoymous request, then canonicalUserId is an empty string - String userId = UserContext.current().getCanonicalUserId(); - if ( 0 == userId.length()) - { - // -> is an anonymous principal ACL set for this ? - if (hasPermission( aclDao.listGrants( target, targetId, "A" ), requestedPermission )) return; - } - else - { // -> no priviledges means no access allowed - if (hasPermission( aclDao.listGrants( target, targetId, userId ), requestedPermission )) return; - - // -> or maybe there is any principal authenticated ACL set for this ? - if (hasPermission( aclDao.listGrants( target, targetId, "*" ), requestedPermission )) return; - } -======= - // If an annoymous request, then canonicalUserId is an empty string - String userId = UserContext.current().getCanonicalUserId(); - if ( 0 == userId.length()) - { - // Is an anonymous principal ACL set for this ? - if (hasPermission( aclDao.listGrants( target, targetId, "A" ), requestedPermission )) return; - } - else - { - if (hasPermission( aclDao.listGrants( target, targetId, userId ), requestedPermission )) return; - // Or alternatively is there is any principal authenticated ACL set for this ? - if (hasPermission( aclDao.listGrants( target, targetId, "*" ), requestedPermission )) return; - } - // No privileges implies that no access is allowed in the case of an anonymous user ->>>>>>> 6472e7b... Now really adding the renamed files! - throw new PermissionDeniedException( "Access Denied - ACLs do not give user the required permission" ); - } - - /** -<<<<<<< HEAD - * This function assumes that the bucket has been tested to make sure it exists before -======= - * This method assumes that the bucket has been tested to make sure it exists before ->>>>>>> 6472e7b... Now really adding the renamed files! - * it is called. - * - * @param context - * @return S3BucketPolicy - * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException, ParseException - */ - public static S3BucketPolicy loadPolicy( S3PolicyContext context ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException - { -<<<<<<< HEAD - Tuple result = ServiceProvider.getInstance().getBucketPolicy( context.getBucketName()); -======= - OrderedPair result = ServiceProvider.getInstance().getBucketPolicy( context.getBucketName()); ->>>>>>> 6472e7b... Now really adding the renamed files! - S3BucketPolicy policy = result.getFirst(); - if ( null == policy ) - { - // -> do we have to load it from the database (any other value means there is no policy)? - if (-1 == result.getSecond().intValue()) - { - BucketPolicyDao policyDao = new BucketPolicyDao(); - String policyInJson = policyDao.getPolicy( context.getBucketName()); - // -> place in cache that no policy exists in the database - if (null == policyInJson) { - ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), null); - return null; - } - - PolicyParser parser = new PolicyParser(); - policy = parser.parse( policyInJson, context.getBucketName()); - if (null != policy) - ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), policy); - } - } - return policy; - } - - public static void verifyBucketName( String bucketName, boolean useDNSGuidelines ) throws InvalidBucketName - { - // [A] To comply with Amazon S3 basic requirements, bucket names must meet the following conditions - // -> must be between 3 and 255 characters long - int size = bucketName.length(); - if (3 > size || size > 255) - throw new InvalidBucketName( bucketName + " is not between 3 and 255 characters long" ); - - // -> must start with a number or letter - if (!Character.isLetterOrDigit( bucketName.charAt( 0 ))) - throw new InvalidBucketName( bucketName + " does not start with a number or letter" ); - - // -> can contain lowercase letters, numbers, periods (.), underscores (_), and dashes (-) - // -> the bucket name can also contain uppercase letters but it is not recommended - for( int i=0; i < bucketName.length(); i++ ) - { - char next = bucketName.charAt(i); - if (Character.isLetter( next )) continue; - else if (Character.isDigit( next )) continue; - else if ('.' == next) continue; - else if ('_' == next) continue; - else if ('-' == next) continue; - else throw new InvalidBucketName( bucketName + " contains the invalid character: " + next ); - } - - // -> must not be formatted as an IP address (e.g., 192.168.5.4) - String[] parts = bucketName.split( "\\." ); - if (4 == parts.length) - { - try { - int first = Integer.parseInt( parts[0] ); - int second = Integer.parseInt( parts[1] ); - int third = Integer.parseInt( parts[2] ); - int fourth = Integer.parseInt( parts[3] ); - throw new InvalidBucketName( bucketName + " is formatted as an IP address" ); - } -<<<<<<< HEAD - catch( NumberFormatException e ) {} -======= - catch( NumberFormatException e ) - {throw new InvalidBucketName( bucketName);} ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - - // [B] To conform with DNS requirements, Amazon recommends following these additional guidelines when creating buckets - // -> bucket names should be between 3 and 63 characters long - if (useDNSGuidelines) - { - // -> bucket names should be between 3 and 63 characters long - if (3 > size || size > 63) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " is not between 3 and 63 characters long" ); - - // -> bucket names should not contain underscores (_) - int pos = bucketName.indexOf( '_' ); - if (-1 != pos) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain underscores" ); - - // -> bucket names should not end with a dash - if (bucketName.endsWith( "-" )) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not end with a dash" ); - - // -> bucket names cannot contain two, adjacent periods - pos = bucketName.indexOf( ".." ); - if (-1 != pos) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \"..\"" ); - - // -> bucket names cannot contain dashes next to periods (e.g., "my-.bucket.com" and "my.-bucket" are invalid) - if (-1 != bucketName.indexOf( "-." ) || -1 != bucketName.indexOf( ".-" )) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \".-\" or \"-.\"" ); - } - } - -<<<<<<< HEAD - private static boolean hasPermission( List priviledges, int requestedPermission ) - { - ListIterator it = priviledges.listIterator(); - while( it.hasNext()) - { - // -> is the requested permission "contained" in one or the granted rights for this user -======= - private static boolean hasPermission( List privileges, int requestedPermission ) - { - ListIterator it = privileges.listIterator(); - while( it.hasNext()) - { - // True providing the requested permission is contained in one or the granted rights for this user. False otherwise. ->>>>>>> 6472e7b... Now really adding the renamed files! - SAcl rights = (SAcl)it.next(); - int permission = rights.getPermission(); - if (requestedPermission == (permission & requestedPermission)) return true; - } - return false; - } - - /** -<<<<<<< HEAD - * ifRange is true and IfUnmodifiedSince or IfMatch fails then we return the entire object (indicated by -======= - * ifRange is true and ifUnmodifiedSince or IfMatch fails then we return the entire object (indicated by ->>>>>>> 6472e7b... Now really adding the renamed files! - * returning a -1 as the function result. - * - * @param ifCond - conditional get defined by these tests - * @param lastModified - value used on ifModifiedSince or ifUnmodifiedSince - * @param ETag - value used on ifMatch and ifNoneMatch -<<<<<<< HEAD - * @param ifRange - using an If-Range HTTP functionality -======= - * @param ifRange - using an if-Range HTTP functionality ->>>>>>> 6472e7b... Now really adding the renamed files! - * @return -1 means return the entire object with an HTTP 200 (not a subrange) - */ - private int conditionPassed( S3ConditionalHeaders ifCond, Date lastModified, String ETag, boolean ifRange ) - { - if (null == ifCond) return 200; - - if (0 > ifCond.ifModifiedSince( lastModified )) - return 304; - - if (0 > ifCond.ifUnmodifiedSince( lastModified )) - return (ifRange ? -1 : 412); - - if (0 > ifCond.ifMatchEtag( ETag )) - return (ifRange ? -1 : 412); - - if (0 > ifCond.ifNoneMatchEtag( ETag )) - return 412; - - return 200; - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.TimeZone; +import java.util.UUID; + +import javax.servlet.http.HttpServletResponse; + +import org.apache.log4j.Logger; +import org.hibernate.LockMode; +import org.hibernate.Session; +import org.json.simple.parser.ParseException; + +import com.cloud.bridge.io.S3FileSystemBucketAdapter; +import com.cloud.bridge.model.MHost; +import com.cloud.bridge.model.MHostMount; +import com.cloud.bridge.model.SAcl; +import com.cloud.bridge.model.SBucket; +import com.cloud.bridge.model.SHost; +import com.cloud.bridge.model.SMeta; +import com.cloud.bridge.model.SObject; +import com.cloud.bridge.model.SObjectItem; +import com.cloud.bridge.persist.PersistContext; +import com.cloud.bridge.persist.dao.BucketPolicyDao; +import com.cloud.bridge.persist.dao.MHostDao; +import com.cloud.bridge.persist.dao.MHostMountDao; +import com.cloud.bridge.persist.dao.MultipartLoadDao; +import com.cloud.bridge.persist.dao.SAclDao; +import com.cloud.bridge.persist.dao.SBucketDao; +import com.cloud.bridge.persist.dao.SHostDao; +import com.cloud.bridge.persist.dao.SMetaDao; +import com.cloud.bridge.persist.dao.SObjectDao; +import com.cloud.bridge.persist.dao.SObjectItemDao; +import com.cloud.bridge.service.S3Constants; +import com.cloud.bridge.service.UserContext; +import com.cloud.bridge.service.controller.s3.ServiceProvider; +import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess; +import com.cloud.bridge.service.core.s3.S3CopyObjectRequest.MetadataDirective; +import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; +import com.cloud.bridge.service.core.s3.S3PolicyCondition.ConditionKeys; +import com.cloud.bridge.service.exception.HostNotMountedException; +import com.cloud.bridge.service.exception.InternalErrorException; +import com.cloud.bridge.service.exception.InvalidBucketName; +import com.cloud.bridge.service.exception.NoSuchObjectException; +import com.cloud.bridge.service.exception.ObjectAlreadyExistsException; +import com.cloud.bridge.service.exception.OutOfServiceException; +import com.cloud.bridge.service.exception.OutOfStorageException; +import com.cloud.bridge.service.exception.PermissionDeniedException; +import com.cloud.bridge.service.exception.UnsupportedException; +import com.cloud.bridge.util.DateHelper; +import com.cloud.bridge.util.PolicyParser; +import com.cloud.bridge.util.StringHelper; +import com.cloud.bridge.util.OrderedPair; +import com.cloud.bridge.util.Triple; + +/** + * @author Kelven Yang, John Zucker + * The CRUD control actions to be invoked from S3BucketAction or S3ObjectAction. + */ +public class S3Engine { + protected final static Logger logger = Logger.getLogger(S3Engine.class); + + private final int LOCK_ACQUIRING_TIMEOUT_SECONDS = 10; // ten seconds + + private final Map bucketAdapters = new HashMap(); + + public S3Engine() { + bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter()); + } + + + /** + * Return a S3CopyObjectResponse which represents an object being copied from source + * to destination bucket. + * Called from S3ObjectAction when copying an object. + * This can be treated as first a GET followed by a PUT of the object the user wants to copy. + */ + + public S3CopyObjectResponse handleRequest(S3CopyObjectRequest request) + { + S3CopyObjectResponse response = new S3CopyObjectResponse(); + + // [A] Get the object we want to copy + S3GetObjectRequest getRequest = new S3GetObjectRequest(); + getRequest.setBucketName(request.getSourceBucketName()); + getRequest.setKey(request.getSourceKey()); + getRequest.setVersion(request.getVersion()); + getRequest.setConditions( request.getConditions()); + + getRequest.setInlineData( true ); + getRequest.setReturnData( true ); + if ( MetadataDirective.COPY == request.getDirective()) + getRequest.setReturnMetadata( true ); + else getRequest.setReturnMetadata( false ); + + //-> before we do anything verify the permissions on a copy basis + String destinationBucketName = request.getDestinationBucketName(); + String destinationKeyName = request.getDestinationKey(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, destinationBucketName ); + context.setKeyName( destinationKeyName ); + context.setEvalParam( ConditionKeys.MetaData, request.getDirective().toString()); + context.setEvalParam( ConditionKeys.CopySource, "/" + request.getSourceBucketName() + "/" + request.getSourceKey()); + if (PolicyAccess.DENY == verifyPolicy( context )) + throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); + + S3GetObjectResponse originalObject = handleRequest(getRequest); + int resultCode = originalObject.getResultCode(); + if (200 != resultCode) { + response.setResultCode( resultCode ); + response.setResultDescription( originalObject.getResultDescription()); + return response; + } + + response.setCopyVersion( originalObject.getVersion()); + + + // [B] Put the object into the destination bucket + S3PutObjectInlineRequest putRequest = new S3PutObjectInlineRequest(); + putRequest.setBucketName(request.getDestinationBucketName()) ; + putRequest.setKey(destinationKeyName); + if ( MetadataDirective.COPY == request.getDirective()) + putRequest.setMetaEntries(originalObject.getMetaEntries()); + else putRequest.setMetaEntries(request.getMetaEntries()); + putRequest.setAcl(request.getAcl()); // -> if via a SOAP call + putRequest.setCannedAccess(request.getCannedAccess()); // -> if via a REST call + putRequest.setContentLength(originalObject.getContentLength()); + putRequest.setData(originalObject.getData()); + + S3PutObjectInlineResponse putResp = handleRequest(putRequest); + response.setResultCode( putResp.resultCode ); + response.setResultDescription( putResp.getResultDescription()); + response.setETag( putResp.getETag()); + response.setLastModified( putResp.getLastModified()); + response.setPutVersion( putResp.getVersion()); + return response; + } + + public S3CreateBucketResponse handleRequest(S3CreateBucketRequest request) + { + S3CreateBucketResponse response = new S3CreateBucketResponse(); + String cannedAccessPolicy = request.getCannedAccess(); + String bucketName = request.getBucketName(); + response.setBucketName( bucketName ); + + verifyBucketName( bucketName, false ); + + S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket, bucketName ); + context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy ); + if (PolicyAccess.DENY == verifyPolicy( context )) + throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); + + if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS)) + { + OrderedPair shost_storagelocation_pair = null; + boolean success = false; + try { + SBucketDao bucketDao = new SBucketDao(); + SAclDao aclDao = new SAclDao(); + + if (bucketDao.getByName(request.getBucketName()) != null) + throw new ObjectAlreadyExistsException("Bucket already exists"); + + shost_storagelocation_pair = allocBucketStorageHost(request.getBucketName(), null); + + SBucket sbucket = new SBucket(); + sbucket.setName(request.getBucketName()); + sbucket.setCreateTime(DateHelper.currentGMTTime()); + sbucket.setOwnerCanonicalId( UserContext.current().getCanonicalUserId()); + sbucket.setShost(shost_storagelocation_pair.getFirst()); + shost_storagelocation_pair.getFirst().getBuckets().add(sbucket); + bucketDao.save(sbucket); + + S3AccessControlList acl = request.getAcl(); + + if ( null != cannedAccessPolicy ) + setCannedAccessControls( cannedAccessPolicy, "SBucket", sbucket.getId(), sbucket ); + else if (null != acl) + aclDao.save( "SBucket", sbucket.getId(), acl ); + else setSingleAcl( "SBucket", sbucket.getId(), SAcl.PERMISSION_FULL ); + + // explicitly commit the transaction + PersistContext.commitTransaction(); + success = true; + } + finally + { + if(!success && shost_storagelocation_pair != null) { + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(shost_storagelocation_pair.getFirst()); + bucketAdapter.deleteContainer(shost_storagelocation_pair.getSecond(), request.getBucketName()); + } + PersistContext.releaseNamedLock("bucket.creation"); + } + + } else { + throw new OutOfServiceException("Unable to acquire synchronization lock"); + } + + return response; + } + + /** + * Return a S3Response which represents the effect of an object being deleted from its bucket. + * Called from S3BucketAction when deleting an object. + */ + + public S3Response handleRequest( S3DeleteBucketRequest request ) + { + S3Response response = new S3Response(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + + if ( sbucket != null ) + { + S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName ); + switch( verifyPolicy( context )) + { + case ALLOW: + // The bucket policy can give users permission to delete a bucket whereas ACLs cannot + break; + + case DENY: + throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); + + case DEFAULT_DENY: + default: + // Irrespective of what the ACLs say, only the owner can delete a bucket + String client = UserContext.current().getCanonicalUserId(); + if (!client.equals( sbucket.getOwnerCanonicalId())) { + throw new PermissionDeniedException( "Access Denied - only the owner can delete a bucket" ); + } + break; + } + + + // Delete the file from its storage location + OrderedPair host_storagelocation_pair = getBucketStorageHost(sbucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName()); + + // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. + // To delete SMeta & SAcl objects: + // (1)Get all the objects in the bucket, + // (2)then all the items in each object, + // (3) then all meta & acl data for each item + Set objectsInBucket = sbucket.getObjectsInBucket(); + Iterator it = objectsInBucket.iterator(); + while( it.hasNext()) + { + SObject oneObject = (SObject)it.next(); + Set itemsInObject = oneObject.getItems(); + Iterator is = itemsInObject.iterator(); + while( is.hasNext()) + { + SObjectItem oneItem = (SObjectItem)is.next(); + deleteMetaData( oneItem.getId()); + deleteObjectAcls( "SObjectItem", oneItem.getId()); + } + } + + // Delete all the policy state associated with the bucket + try { + ServiceProvider.getInstance().deleteBucketPolicy( bucketName ); + BucketPolicyDao policyDao = new BucketPolicyDao(); + policyDao.deletePolicy( bucketName ); + } + catch( Exception e ) { + logger.error("When deleting a bucket we must try to delete its policy: ", e); + } + + deleteBucketAcls( sbucket.getId()); + bucketDao.delete( sbucket ); + response.setResultCode(204); + response.setResultDescription("OK"); + } + else + { response.setResultCode(404); + response.setResultDescription("Bucket does not exist"); + } + return response; + } + + /** + * Return a S3ListBucketResponse which represents a list of up to 1000 objects contained ins the bucket. + * Called from S3BucketAction for GETting objects and for GETting object versions. + */ + + public S3ListBucketResponse listBucketContents(S3ListBucketRequest request, boolean includeVersions) + { + S3ListBucketResponse response = new S3ListBucketResponse(); + String bucketName = request.getBucketName(); + String prefix = request.getPrefix(); + if (prefix == null) prefix = StringHelper.EMPTY_STRING; + String marker = request.getMarker(); + if (marker == null) marker = StringHelper.EMPTY_STRING; + + String delimiter = request.getDelimiter(); + int maxKeys = request.getMaxKeys(); + if(maxKeys <= 0) maxKeys = 1000; + + SBucketDao bucketDao = new SBucketDao(); + SBucket sbucket = bucketDao.getByName(bucketName); + if (sbucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + PolicyActions action = (includeVersions ? PolicyActions.ListBucketVersions : PolicyActions.ListBucket); + S3PolicyContext context = new S3PolicyContext( action, bucketName ); + context.setEvalParam( ConditionKeys.MaxKeys, new String( "" + maxKeys )); + context.setEvalParam( ConditionKeys.Prefix, prefix ); + context.setEvalParam( ConditionKeys.Delimiter, delimiter ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ ); + + + // Wen execting the query, request one more item so that we know how to set isTruncated flag + SObjectDao sobjectDao = new SObjectDao(); + List l = null; + + if ( includeVersions ) + l = sobjectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 ); + else l = sobjectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 ); + + response.setBucketName(bucketName); + response.setMarker(marker); + response.setMaxKeys(maxKeys); + response.setPrefix(prefix); + response.setDelimiter(delimiter); + response.setTruncated(l.size() > maxKeys); + if(l.size() > maxKeys) { + response.setNextMarker(l.get(l.size() - 1).getNameKey()); + } + + // If needed - SOAP response does not support versioning + response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker())); + response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys)); + return response; + } + + /** + * Return a S3ListAllMyBucketResponse which represents a list of all buckets owned by the requester. + * Called from S3BucketAction for GETting all buckets. + * To check on bucket policies defined we have to (look for and) evaluate the policy on each + * bucket the user owns. + */ + public S3ListAllMyBucketsResponse handleRequest(S3ListAllMyBucketsRequest request) + { + S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse(); + SBucketDao bucketDao = new SBucketDao(); + + // "...you can only list buckets for which you are the owner." + List buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId()); + S3CanonicalUser owner = new S3CanonicalUser(); + owner.setID(UserContext.current().getCanonicalUserId()); + owner.setDisplayName(""); + response.setOwner(owner); + + if (buckets != null) + { + S3ListAllMyBucketsEntry[] entries = new S3ListAllMyBucketsEntry[buckets.size()]; + int i = 0; + for(SBucket bucket : buckets) + { + String bucketName = bucket.getName(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.ListAllMyBuckets, bucketName ); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_PASS ); + + entries[i] = new S3ListAllMyBucketsEntry(); + entries[i].setName(bucketName); + entries[i].setCreationDate(DateHelper.toCalendar(bucket.getCreateTime())); + i++; + } + response.setBuckets(entries); + } + return response; + } + + /** + * Return an S3Response representing the result of PUTTING the ACL of a given bucket. + * Called from S3BucketAction to PUT its ACL. + */ + + public S3Response handleRequest(S3SetBucketAccessControlPolicyRequest request) + { + S3Response response = new S3Response(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName(bucketName); + if(sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket does not exist"); + return response; + } + + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL ); + + SAclDao aclDao = new SAclDao(); + aclDao.save("SBucket", sbucket.getId(), request.getAcl()); + + response.setResultCode(200); + response.setResultDescription("OK"); + return response; + } + + + /** + * Return a S3AccessControlPolicy representing the ACL of a given bucket. + * Called from S3BucketAction to GET its ACL. + */ + + public S3AccessControlPolicy handleRequest(S3GetBucketAccessControlPolicyRequest request) + { + S3AccessControlPolicy policy = new S3AccessControlPolicy(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) + throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + S3CanonicalUser owner = new S3CanonicalUser(); + owner.setID(sbucket.getOwnerCanonicalId()); + owner.setDisplayName(""); + policy.setOwner(owner); + + S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketAcl, bucketName ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ_ACL ); + + SAclDao aclDao = new SAclDao(); + List grants = aclDao.listGrants("SBucket", sbucket.getId()); + policy.setGrants(S3Grant.toGrants(grants)); + return policy; + } + + /** + * This method should be called if a multipart upload is aborted OR has completed successfully and + * the individual parts have to be cleaned up. + * Called from S3ObjectAction when executing at completion or when aborting multipart upload. + * @param bucketName + * @param uploadId + * @param verifyPermission - If false then do not check the user's permission to clean up the state + */ + public int freeUploadParts(String bucketName, int uploadId, boolean verifyPermission) + { + // -> we need to look up the final bucket to figure out which mount point to use to save the part in + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); + return 404; + } + + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + OrderedPair exists = uploadDao.multipartExits( uploadId ); + if (null == exists) { + logger.error( "initiateMultipartUpload failed since multipart upload" + uploadId + " does not exist" ); + return 404; + } + + // -> the multipart initiator or bucket owner can do this action by default + if (verifyPermission) + { + String initiator = uploadDao.getInitiator( uploadId ); + if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) + { + // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket + S3PolicyContext context = new S3PolicyContext( PolicyActions.AbortMultipartUpload, bucketName ); + context.setKeyName( exists.getSecond()); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + } + } + + // -> first get a list of all the uploaded files and delete one by one + S3MultipartPart[] parts = uploadDao.getParts( uploadId, 10000, 0 ); + for( int i=0; i < parts.length; i++ ) + { + bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), parts[i].getPath()); + } + + uploadDao.deleteUpload( uploadId ); + return 204; + + } + catch( PermissionDeniedException e ) { + logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e); + throw e; + } + catch (Exception e) { + logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e); + return 500; + } + } + + /** + * The initiator must have permission to write to the bucket in question in order to initiate + * a multipart upload. Also check to make sure the special folder used to store parts of + * a multipart exists for this bucket. + * Called from S3ObjectAction during many stages of multipart upload. + */ + public S3PutObjectInlineResponse initiateMultipartUpload(S3PutObjectInlineRequest request) + { + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + String nameKey = request.getKey(); + + // -> does the bucket exist and can we write to it? + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); + response.setResultCode(404); + } + + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); + context.setKeyName( nameKey ); + context.setEvalParam( ConditionKeys.Acl, request.getCannedAccess()); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + + createUploadFolder( bucketName ); + + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + int uploadId = uploadDao.initiateUpload( UserContext.current().getAccessKey(), bucketName, nameKey, request.getCannedAccess(), request.getMetaEntries()); + response.setUploadId( uploadId ); + response.setResultCode(200); + + } catch( Exception e ) { + logger.error("initiateMultipartUpload exception: ", e); + response.setResultCode(500); + } + + return response; + } + + /** + * Save the object fragment in a special (i.e., hidden) directory inside the same mount point as + * the bucket location that the final object will be stored in. + * Called from S3ObjectAction during many stages of multipart upload. + * @param request + * @param uploadId + * @param partNumber + * @return S3PutObjectInlineResponse + */ + public S3PutObjectInlineResponse saveUploadPart(S3PutObjectInlineRequest request, int uploadId, int partNumber) + { + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + + // -> we need to look up the final bucket to figure out which mount point to use to save the part in + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "saveUploadedPart failed since " + bucketName + " does not exist" ); + response.setResultCode(404); + } + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); + context.setKeyName( request.getKey()); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = new String( uploadId + "-" + partNumber ); + InputStream is = null; + + try { + is = request.getDataInputStream(); + String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName); + response.setETag(md5Checksum); + + MultipartLoadDao uploadDao = new MultipartLoadDao(); + uploadDao.savePart( uploadId, partNumber, md5Checksum, itemFileName, (int)request.getContentLength()); + response.setResultCode(200); + + } catch (IOException e) { + logger.error("UploadPart failed due to " + e.getMessage(), e); + response.setResultCode(500); + } catch (OutOfStorageException e) { + logger.error("UploadPart failed due to " + e.getMessage(), e); + response.setResultCode(500); + } catch (Exception e) { + logger.error("UploadPart failed due to " + e.getMessage(), e); + response.setResultCode(500); + } finally { + if(is != null) { + try { + is.close(); + } catch (IOException e) { + logger.error("UploadPart unable to close stream from data handler.", e); + } + } + } + + return response; + } + + /** + * Create the real object represented by all the parts of the multipart upload. + * Called from S3ObjectAction at completion of multipart upload. + * @param httpResp - Servlet response handle to return the headers of the response (including version header) + * @param request - Normal parameters needed to create a new object (including metadata) + * @param parts - List of files that make up the multipart + * @param outputStream - Response output stream + * N.B. - This method can be long-lasting + * We are required to keep the connection alive by returning whitespace characters back periodically. + */ + + public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream outputStream) throws IOException + { + // [A] Set up and initial error checking + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + String key = request.getKey(); + S3MetaDataEntry[] meta = request.getMetaEntries(); + + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "completeMultipartUpload( failed since " + bucketName + " does not exist" ); + response.setResultCode(404); + } + + // [B] Now we need to create the final re-assembled object + // -> the allocObjectItem checks for the bucket policy PutObject permissions + OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, null, request.getCannedAccess()); + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); + + // -> Amazon defines that we must return a 200 response immediately to the client, but + // -> we don't know the version header until we hit here + httpResp.setStatus(200); + httpResp.setContentType("text/xml; charset=UTF-8"); + String version = object_objectitem_pair.getSecond().getVersion(); + if (null != version) httpResp.addHeader( "x-amz-version-id", version ); + httpResp.flushBuffer(); + + + // [C] Re-assemble the object from its uploaded file parts + try { + // explicit transaction control to avoid holding transaction during long file concatenation process + PersistContext.commitTransaction(); + + OrderedPair result = bucketAdapter. + concatentateObjects + ( host_storagelocation_pair.getSecond(), + bucket.getName(), + itemFileName, + ServiceProvider.getInstance().getMultipartDir(), + parts, + outputStream ); + response.setETag(result.getFirst()); + response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); + + SObjectItemDao itemDao = new SObjectItemDao(); + SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); + item.setMd5(result.getFirst()); + item.setStoredSize(result.getSecond().longValue()); + response.setResultCode(200); + + PersistContext.getSession().save(item); + } + catch (Exception e) { + logger.error("completeMultipartUpload failed due to " + e.getMessage(), e); + } + return response; + } + + /** + * Return a S3PutObjectInlineResponse which represents an object being created into a bucket + * Called from S3ObjectAction when PUTting or POTing an object. + */ + + public S3PutObjectInlineResponse handleRequest(S3PutObjectInlineRequest request) + { + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + String key = request.getKey(); + long contentLength = request.getContentLength(); + S3MetaDataEntry[] meta = request.getMetaEntries(); + S3AccessControlList acl = request.getAcl(); + + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + + // Is the caller allowed to write the object? + // The allocObjectItem checks for the bucket policy PutObject permissions + OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess()); + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); + InputStream is = null; + + try { + // explicit transaction control to avoid holding transaction during file-copy process + PersistContext.commitTransaction(); + + is = request.getDataInputStream(); + String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); + response.setETag(md5Checksum); + response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); + response.setVersion( object_objectitem_pair.getSecond().getVersion()); + + SObjectItemDao itemDao = new SObjectItemDao(); + SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); + item.setMd5(md5Checksum); + item.setStoredSize(contentLength); + PersistContext.getSession().save(item); + + } catch (IOException e) { + logger.error("PutObjectInline failed due to " + e.getMessage(), e); + } catch (OutOfStorageException e) { + logger.error("PutObjectInline failed due to " + e.getMessage(), e); + } finally { + if(is != null) { + try { + is.close(); + } catch (IOException e) { + logger.error("PutObjectInline unable to close stream from data handler.", e); + } + } + } + + return response; + } + + /** + * Return a S3PutObjectResponse which represents an object being created into a bucket + * Called from S3RestServlet when processing a DIME request. + */ + + public S3PutObjectResponse handleRequest(S3PutObjectRequest request) + { + S3PutObjectResponse response = new S3PutObjectResponse(); + String bucketName = request.getBucketName(); + String key = request.getKey(); + long contentLength = request.getContentLength(); + S3MetaDataEntry[] meta = request.getMetaEntries(); + S3AccessControlList acl = request.getAcl(); + + SBucketDao bucketDao = new SBucketDao(); + SBucket bucket = bucketDao.getByName(bucketName); + if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + // Is the caller allowed to write the object? + // The allocObjectItem checks for the bucket policy PutObject permissions + OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null); + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); + InputStream is = null; + try { + // explicit transaction control to avoid holding transaction during file-copy process + PersistContext.commitTransaction(); + + is = request.getInputStream(); + String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); + response.setETag(md5Checksum); + response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); + + SObjectItemDao itemDao = new SObjectItemDao(); + SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId()); + item.setMd5(md5Checksum); + item.setStoredSize(contentLength); + PersistContext.getSession().save(item); + + } catch (OutOfStorageException e) { + logger.error("PutObject failed due to " + e.getMessage(), e); + } finally { + if(is != null) { + try { + is.close(); + } catch (IOException e) { + logger.error("Unable to close stream from data handler.", e); + } + } + } + + return response; + } + + /** + * The ACL of an object is set at the object version level. By default, PUT sets the ACL of the latest + * version of an object. To set the ACL of a different version, using the versionId subresource. + * Called from S3ObjectAction to PUT an object's ACL. + */ + + public S3Response handleRequest(S3SetObjectAccessControlPolicyRequest request) + { + S3PolicyContext context = null; + + // [A] First find the object in the bucket + S3Response response = new S3Response(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + if(sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket " + bucketName + "does not exist"); + return response; + } + + SObjectDao sobjectDao = new SObjectDao(); + String nameKey = request.getKey(); + SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey ); + if(sobject == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " in bucket " + bucketName + " does not exist"); + return response; + } + + String deletionMark = sobject.getDeletionMark(); + if (null != deletionMark) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); + return response; + } + + + // [B] Versioning allow the client to ask for a specific version not just the latest + SObjectItem item = null; + int versioningStatus = sbucket.getVersioningStatus(); + String wantVersion = request.getVersion(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) + item = sobject.getVersion( wantVersion ); + else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); + + if (item == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); + return response; + } + + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { + context = new S3PolicyContext( PolicyActions.PutObjectAclVersion, bucketName ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + response.setVersion( item.getVersion()); + } + else context = new S3PolicyContext( PolicyActions.PutObjectAcl, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_WRITE_ACL ); + + // -> the acl always goes on the instance of the object + SAclDao aclDao = new SAclDao(); + aclDao.save("SObjectItem", item.getId(), request.getAcl()); + + response.setResultCode(200); + response.setResultDescription("OK"); + return response; + } + + /** + * By default, GET returns ACL information about the latest version of an object. To return ACL + * information about a different version, use the versionId subresource + * Called from S3ObjectAction to get an object's ACL. + */ + + public S3AccessControlPolicy handleRequest(S3GetObjectAccessControlPolicyRequest request) + { + S3PolicyContext context = null; + + // [A] Does the object exist that holds the ACL we are looking for? + S3AccessControlPolicy policy = new S3AccessControlPolicy(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) + throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + SObjectDao sobjectDao = new SObjectDao(); + String nameKey = request.getKey(); + SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey ); + if (sobject == null) + throw new NoSuchObjectException("Object " + request.getKey() + " does not exist"); + + String deletionMark = sobject.getDeletionMark(); + if (null != deletionMark) { + policy.setResultCode(404); + policy.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); + return policy; + } + + + // [B] Versioning allow the client to ask for a specific version not just the latest + SObjectItem item = null; + int versioningStatus = sbucket.getVersioningStatus(); + String wantVersion = request.getVersion(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) + item = sobject.getVersion( wantVersion ); + else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); + + if (item == null) { + policy.setResultCode(404); + policy.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); + return policy; + } + + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { + context = new S3PolicyContext( PolicyActions.GetObjectVersionAcl, bucketName ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + policy.setVersion( item.getVersion()); + } + else context = new S3PolicyContext( PolicyActions.GetObjectAcl, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ_ACL ); + + + // [C] ACLs are ALWAYS on an instance of the object + S3CanonicalUser owner = new S3CanonicalUser(); + owner.setID(sobject.getOwnerCanonicalId()); + owner.setDisplayName(""); + policy.setOwner(owner); + policy.setResultCode(200); + + SAclDao aclDao = new SAclDao(); + List grants = aclDao.listGrants( "SObjectItem", item.getId()); + policy.setGrants(S3Grant.toGrants(grants)); + return policy; + } + + /** + * Handle requests for GET object and HEAD "get object extended" + * Called from S3ObjectAction for GET and HEAD of an object. + */ + + public S3GetObjectResponse handleRequest(S3GetObjectRequest request) + { + S3GetObjectResponse response = new S3GetObjectResponse(); + S3PolicyContext context = null; + boolean ifRange = false; + long bytesStart = request.getByteRangeStart(); + long bytesEnd = request.getByteRangeEnd(); + int resultCode = 200; + + // [A] Verify that the bucket and the object exist + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName(bucketName); + if (sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket " + request.getBucketName() + " does not exist"); + return response; + } + + SObjectDao objectDao = new SObjectDao(); + String nameKey = request.getKey(); + SObject sobject = objectDao.getByNameKey( sbucket, nameKey ); + if (sobject == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " does not exist in bucket " + request.getBucketName()); + return response; + } + + String deletionMark = sobject.getDeletionMark(); + if (null != deletionMark) { + response.setDeleteMarker( deletionMark ); + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); + return response; + } + + + // [B] Versioning allow the client to ask for a specific version not just the latest + SObjectItem item = null; + int versioningStatus = sbucket.getVersioningStatus(); + String wantVersion = request.getVersion(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) + item = sobject.getVersion( wantVersion ); + else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); + + if (item == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); + return response; + } + + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { + context = new S3PolicyContext( PolicyActions.GetObjectVersion, bucketName ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + } + else context = new S3PolicyContext( PolicyActions.GetObject, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ ); + + + // [C] Handle all the IFModifiedSince ... conditions, and access privileges + // -> http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 (HTTP If-Range header) + if (request.isReturnCompleteObjectOnConditionFailure() && (0 <= bytesStart && 0 <= bytesEnd)) ifRange = true; + + resultCode = conditionPassed( request.getConditions(), item.getLastModifiedTime(), item.getMd5(), ifRange ); + if ( -1 == resultCode ) { + // -> If-Range implementation, we have to return the entire object + resultCode = 200; + bytesStart = -1; + bytesEnd = -1; + } + else if (200 != resultCode) { + response.setResultCode( resultCode ); + response.setResultDescription( "Precondition Failed" ); + return response; + } + + + // [D] Return the contents of the object inline + // -> extract the meta data that corresponds the specific versioned item + SMetaDao metaDao = new SMetaDao(); + List itemMetaData = metaDao.getByTarget( "SObjectItem", item.getId()); + if (null != itemMetaData) + { + int i = 0; + S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ]; + ListIterator it = itemMetaData.listIterator(); + while( it.hasNext()) { + SMeta oneTag = (SMeta)it.next(); + S3MetaDataEntry oneEntry = new S3MetaDataEntry(); + oneEntry.setName( oneTag.getName()); + oneEntry.setValue( oneTag.getValue()); + metaEntries[i++] = oneEntry; + } + response.setMetaEntries( metaEntries ); + } + + // -> support a single byte range + if ( 0 <= bytesStart && 0 <= bytesEnd ) { + response.setContentLength( bytesEnd - bytesStart ); + resultCode = 206; + } + else response.setContentLength( item.getStoredSize()); + + if(request.isReturnData()) + { + response.setETag(item.getMd5()); + response.setLastModified(DateHelper.toCalendar( item.getLastModifiedTime())); + response.setVersion( item.getVersion()); + if (request.isInlineData()) + { + OrderedPair tupleSHostInfo = getBucketStorageHost(sbucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst()); + + if ( 0 <= bytesStart && 0 <= bytesEnd ) + response.setData(bucketAdapter.loadObjectRange(tupleSHostInfo.getSecond(), + request.getBucketName(), item.getStoredPath(), bytesStart, bytesEnd )); + else response.setData(bucketAdapter.loadObject(tupleSHostInfo.getSecond(), request.getBucketName(), item.getStoredPath())); + } + } + + response.setResultCode( resultCode ); + response.setResultDescription("OK"); + return response; + } + + /** + * Handle object deletion requests, both versioning and non-versioning requirements. + * Called from S3ObjectAction for deletion. + */ + public S3Response handleRequest(S3DeleteObjectRequest request) + { + // Verify that the bucket and object exist + S3Response response = new S3Response(); + SBucketDao bucketDao = new SBucketDao(); + String bucketName = request.getBucketName(); + SBucket sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket " + bucketName + " does not exist"); + return response; + } + + SObjectDao objectDao = new SObjectDao(); + String nameKey = request.getKey(); + SObject sobject = objectDao.getByNameKey( sbucket, nameKey ); + if (sobject == null) { + response.setResultCode(404); + response.setResultDescription("No object with key " + nameKey + " exists in bucket " + bucketName); + return response; + } + + + // Discover whether versioning is enabled. If so versioning requires the setting of a deletion marker. + String storedPath = null; + SObjectItem item = null; + int versioningStatus = sbucket.getVersioningStatus(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) + { + String wantVersion = request.getVersion(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObjectVersion, bucketName ); + context.setKeyName( nameKey ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); + + if (null == wantVersion) { + // If versioning is on and no versionId is given then we just write a deletion marker + sobject.setDeletionMark( UUID.randomUUID().toString()); + objectDao.update( sobject ); + } + else { + // Otherwise remove the deletion marker if this has been set + String deletionMarker = sobject.getDeletionMark(); + if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) { + sobject.setDeletionMark( null ); + objectDao.update( sobject ); + response.setResultCode(204); + return response; + } + + // If versioning is on and the versionId is given (non-null) then delete the object matching that version + if ( null == (item = sobject.getVersion( wantVersion ))) { + response.setResultCode(404); + return response; + } + else { + // Providing versionId is non-null, then just delete the one item that matches the versionId from the database + storedPath = item.getStoredPath(); + sobject.deleteItem( item.getId()); + objectDao.update( sobject ); + } + } + } + else + { // If versioning is off then we do delete the null object + S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObject, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); + + if ( null == (item = sobject.getLatestVersion( true ))) { + response.setResultCode(404); + return response; + } + else { + // If there is no item with a null version then we are done + if (null == item.getVersion()) { + // Otherwiswe remove the entire object + // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects. + storedPath = item.getStoredPath(); + deleteMetaData( item.getId()); + deleteObjectAcls( "SObjectItem", item.getId()); + objectDao.delete( sobject ); + } + } + } + + // Delete the file holding the object + if (null != storedPath) + { + OrderedPair host_storagelocation_pair = getBucketStorageHost( sbucket ); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( host_storagelocation_pair.getFirst()); + bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), bucketName, storedPath ); + } + + response.setResultCode(204); + return response; + } + + + private void deleteMetaData( long itemId ) { + SMetaDao metaDao = new SMetaDao(); + List itemMetaData = metaDao.getByTarget( "SObjectItem", itemId ); + if (null != itemMetaData) + { + ListIterator it = itemMetaData.listIterator(); + while( it.hasNext()) { + SMeta oneTag = (SMeta)it.next(); + metaDao.delete( oneTag ); + } + } + } + + private void deleteObjectAcls( String target, long itemId ) { + SAclDao aclDao = new SAclDao(); + List itemAclData = aclDao.listGrants( target, itemId ); + if (null != itemAclData) + { + ListIterator it = itemAclData.listIterator(); + while( it.hasNext()) { + SAcl oneTag = (SAcl)it.next(); + aclDao.delete( oneTag ); + } + } + } + + private void deleteBucketAcls( long bucketId ) { + SAclDao aclDao = new SAclDao(); + List bucketAclData = aclDao.listGrants( "SBucket", bucketId ); + if (null != bucketAclData) + { + ListIterator it = bucketAclData.listIterator(); + while( it.hasNext()) { + SAcl oneTag = (SAcl)it.next(); + aclDao.delete( oneTag ); + } + } + } + + private S3ListBucketPrefixEntry[] composeListBucketPrefixEntries(List l, String prefix, String delimiter, int maxKeys) + { + List entries = new ArrayList(); + int count = 0; + + for(SObject sobject : l) + { + if(delimiter != null && !delimiter.isEmpty()) + { + String subName = StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter); + if(subName != null) + { + S3ListBucketPrefixEntry entry = new S3ListBucketPrefixEntry(); + if ( prefix != null && prefix.length() > 0) + entry.setPrefix(prefix + delimiter + subName); + else entry.setPrefix(subName); + } + } + count++; + if(count >= maxKeys) break; + } + + if(entries.size() > 0) return entries.toArray(new S3ListBucketPrefixEntry[0]); + return null; + } + + /** + * The 'versionIdMarker' parameter only makes sense if enableVersion is true. + * versionIdMarker is the starting point to return information back. So for example if an + * object has versions 1,2,3,4,5 and the versionIdMarker is '3', then 3,4,5 will be returned + * by this function. If the versionIdMarker is null then all versions are returned. + * + * TODO - how does the versionIdMarker work when there is a deletion marker in the object? + */ + private S3ListBucketObjectEntry[] composeListBucketContentEntries(List l, String prefix, String delimiter, int maxKeys, boolean enableVersion, String versionIdMarker) + { + List entries = new ArrayList(); + SObjectItem latest = null; + boolean hitIdMarker = false; + int count = 0; + + for( SObject sobject : l ) + { + if (delimiter != null && !delimiter.isEmpty()) + { + if (StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter) != null) + continue; + } + + if (enableVersion) + { + hitIdMarker = (null == versionIdMarker ? true : false); + + // This supports GET REST calls with /?versions + String deletionMarker = sobject.getDeletionMark(); + if ( null != deletionMarker ) + { + // TODO we should also save the timestamp when something is deleted + S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); + entry.setKey(sobject.getNameKey()); + entry.setVersion( deletionMarker ); + entry.setIsLatest( true ); + entry.setIsDeletionMarker( true ); + entry.setLastModified( Calendar.getInstance( TimeZone.getTimeZone("GMT") )); + entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); + entry.setOwnerDisplayName(""); + entries.add( entry ); + latest = null; + } + else latest = sobject.getLatestVersion( false ); + + Iterator it = sobject.getItems().iterator(); + while( it.hasNext()) + { + SObjectItem item = (SObjectItem)it.next(); + + if ( !hitIdMarker ) + { + if (item.getVersion().equalsIgnoreCase( versionIdMarker )) { + hitIdMarker = true; + entries.add( toListEntry( sobject, item, latest )); + } + } + else entries.add( toListEntry( sobject, item, latest )); + } + } + else + { // -> if there are multiple versions of an object then just return its last version + Iterator it = sobject.getItems().iterator(); + SObjectItem lastestItem = null; + int maxVersion = 0; + int version = 0; + while(it.hasNext()) + { + SObjectItem item = (SObjectItem)it.next(); + String versionStr = item.getVersion(); + + if ( null != versionStr ) + version = Integer.parseInt(item.getVersion()); + else lastestItem = item; + + // -> if the bucket has versions turned on + if (version > maxVersion) { + maxVersion = version; + lastestItem = item; + } + } + if (lastestItem != null) { + entries.add( toListEntry( sobject, lastestItem, null )); + } + } + + count++; + if(count >= maxKeys) break; + } + + if ( entries.size() > 0 ) + return entries.toArray(new S3ListBucketObjectEntry[0]); + else return null; + } + + private static S3ListBucketObjectEntry toListEntry( SObject sobject, SObjectItem item, SObjectItem latest ) + { + S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); + entry.setKey(sobject.getNameKey()); + entry.setVersion( item.getVersion()); + entry.setETag( "\"" + item.getMd5() + "\"" ); + entry.setSize(item.getStoredSize()); + entry.setStorageClass( "STANDARD" ); + entry.setLastModified(DateHelper.toCalendar(item.getLastModifiedTime())); + entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); + entry.setOwnerDisplayName(""); + + if (null != latest && item == latest) entry.setIsLatest( true ); + return entry; + } + + private OrderedPair getBucketStorageHost(SBucket bucket) + { + MHostMountDao mountDao = new MHostMountDao(); + + SHost shost = bucket.getShost(); + if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_LOCAL) { + return new OrderedPair(shost, shost.getExportRoot()); + } + + MHostMount mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId()); + if(mount != null) { + return new OrderedPair(shost, mount.getMountPath()); + } + + // need to redirect request to other node + throw new HostNotMountedException("Storage host " + shost.getHost() + " is not locally mounted"); + } + + /** + * Locate the folder to hold upload parts at the same mount point as the upload's final bucket + * location. Create the upload folder dynamically. + * + * @param bucketName + */ + private void createUploadFolder(String bucketName) + { + if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS)) + { + try { + allocBucketStorageHost(bucketName, ServiceProvider.getInstance().getMultipartDir()); + } + finally { + PersistContext.releaseNamedLock("bucket.creation"); + } + } + } + + /** + * The overrideName is used to create a hidden storage bucket (folder) in the same location + * as the given bucketName. This can be used to create a folder for parts of a multipart + * upload for the associated bucket. + * + * @param bucketName + * @param overrideName + * @return + */ + private OrderedPair allocBucketStorageHost(String bucketName, String overrideName) + { + MHostDao mhostDao = new MHostDao(); + SHostDao shostDao = new SHostDao(); + + MHost mhost = mhostDao.get(ServiceProvider.getInstance().getManagementHostId()); + if(mhost == null) + throw new OutOfServiceException("Temporarily out of service"); + + if(mhost.getMounts().size() > 0) { + Random random = new Random(); + MHostMount[] mounts = (MHostMount[])mhost.getMounts().toArray(); + MHostMount mount = mounts[random.nextInt(mounts.length)]; + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(mount.getShost()); + bucketAdapter.createContainer(mount.getMountPath(), (null != overrideName ? overrideName : bucketName)); + return new OrderedPair(mount.getShost(), mount.getMountPath()); + } + + // To make things simple, only allow one local mounted storage root TODO - Change in the future + String localStorageRoot = ServiceProvider.getInstance().getStartupProperties().getProperty("storage.root"); + if(localStorageRoot != null) { + SHost localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot); + if(localSHost == null) + throw new InternalErrorException("storage.root is configured but not initialized"); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(localSHost); + bucketAdapter.createContainer(localSHost.getExportRoot(),(null != overrideName ? overrideName : bucketName)); + return new OrderedPair(localSHost, localStorageRoot); + } + + throw new OutOfStorageException("No storage host is available"); + } + + public S3BucketAdapter getStorageHostBucketAdapter(SHost shost) + { + S3BucketAdapter adapter = bucketAdapters.get(shost.getHostType()); + if(adapter == null) + throw new InternalErrorException("Bucket adapter is not installed for host type: " + shost.getHostType()); + + return adapter; + } + + /** + * If acl is set then the cannedAccessPolicy parameter should be null and is ignored. + * The cannedAccessPolicy parameter is for REST Put requests only where a simple set of ACLs can be + * created with a single header value. Note that we do not currently support "anonymous" un-authenticated + * access in our implementation. + * + * @throws IOException + */ + @SuppressWarnings("deprecation") + public OrderedPair allocObjectItem(SBucket bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy) + { + SObjectDao objectDao = new SObjectDao(); + SObjectItemDao objectItemDao = new SObjectItemDao(); + SMetaDao metaDao = new SMetaDao(); + SAclDao aclDao = new SAclDao(); + SObjectItem item = null; + int versionSeq = 1; + int versioningStatus = bucket.getVersioningStatus(); + + Session session = PersistContext.getSession(); + + // [A] To write into a bucket the user must have write permission to that bucket + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucket.getName()); + context.setKeyName( nameKey ); + context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy); + + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); // TODO - check this validates plain POSTs + + // [B] If versioning is off them we over write a null object item + SObject object = objectDao.getByNameKey(bucket, nameKey); + if ( object != null ) + { + // -> if versioning is on create new object items + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) + { + session.lock(object, LockMode.UPGRADE); + versionSeq = object.getNextSequence(); + object.setNextSequence(versionSeq + 1); + session.save(object); + + item = new SObjectItem(); + item.setTheObject(object); + object.getItems().add(item); + item.setVersion(String.valueOf(versionSeq)); + Date ts = DateHelper.currentGMTTime(); + item.setCreateTime(ts); + item.setLastAccessTime(ts); + item.setLastModifiedTime(ts); + session.save(item); + } + else + { // -> find an object item with a null version, can be null + // if bucket started out with versioning enabled and was then suspended + item = objectItemDao.getByObjectIdNullVersion( object.getId()); + if (item == null) + { + item = new SObjectItem(); + item.setTheObject(object); + object.getItems().add(item); + Date ts = DateHelper.currentGMTTime(); + item.setCreateTime(ts); + item.setLastAccessTime(ts); + item.setLastModifiedTime(ts); + session.save(item); + } + } + } + else + { // -> there is no object nor an object item + object = new SObject(); + object.setBucket(bucket); + object.setNameKey(nameKey); + object.setNextSequence(2); + object.setCreateTime(DateHelper.currentGMTTime()); + object.setOwnerCanonicalId(UserContext.current().getCanonicalUserId()); + session.save(object); + + item = new SObjectItem(); + item.setTheObject(object); + object.getItems().add(item); + if (SBucket.VERSIONING_ENABLED == versioningStatus) item.setVersion(String.valueOf(versionSeq)); + Date ts = DateHelper.currentGMTTime(); + item.setCreateTime(ts); + item.setLastAccessTime(ts); + item.setLastModifiedTime(ts); + session.save(item); + } + + + // [C] We will use the item DB id as the file name, MD5/contentLength will be stored later + String suffix = null; + int dotPos = nameKey.lastIndexOf('.'); + if (dotPos >= 0) suffix = nameKey.substring(dotPos); + if ( suffix != null ) + item.setStoredPath(String.valueOf(item.getId()) + suffix); + else item.setStoredPath(String.valueOf(item.getId())); + + metaDao.save("SObjectItem", item.getId(), meta); + + + // [D] Are we setting an ACL along with the object + // -> the ACL is ALWAYS set on a particular instance of the object (i.e., a version) + if ( null != cannedAccessPolicy ) + { + setCannedAccessControls( cannedAccessPolicy, "SObjectItem", item.getId(), bucket ); + } + else if (null == acl || 0 == acl.size()) + { + // -> this is termed the "private" or default ACL, "Owner gets FULL_CONTROL" + setSingleAcl( "SObjectItem", item.getId(), SAcl.PERMISSION_FULL ); + } + else if (null != acl) { + aclDao.save( "SObjectItem", item.getId(), acl ); + } + + session.update(item); + return new OrderedPair(object, item); + } + + + /** + * Access controls that are specified via the "x-amz-acl:" headers in REST requests. + * Note that canned policies can be set when the object's contents are set + */ + public void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucket bucket ) + { + // Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy + Triple permission_permission_symbol_triple = + SAcl.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId()); + if ( null == permission_permission_symbol_triple.getThird() ) + setSingleAcl(target, objectId, permission_permission_symbol_triple.getFirst()); + else + { setDefaultAcls( target, + objectId, + permission_permission_symbol_triple.getFirst(), // permission according to ownership of object + permission_permission_symbol_triple.getSecond(), // permission according to ownership of bucket + permission_permission_symbol_triple.getThird() ); // "symbol" to indicate principal or otherwise name of owner + + } + } + + + private void setSingleAcl( String target, long targetId, int permission ) + { + SAclDao aclDao = new SAclDao(); + S3AccessControlList defaultAcl = new S3AccessControlList(); + + // -> if an annoymous request, then do not rewrite the ACL + String userId = UserContext.current().getCanonicalUserId(); + if (0 < userId.length()) + { + S3Grant defaultGrant = new S3Grant(); + defaultGrant.setGrantee(SAcl.GRANTEE_USER); + defaultGrant.setCanonicalUserID( userId ); + defaultGrant.setPermission( permission ); + defaultAcl.addGrant( defaultGrant ); + aclDao.save( target, targetId, defaultAcl ); + } + } + + + /** + * The Cloud Stack API Access key is used for for the Canonical User Id everywhere (buckets and objects). + * + * @param owner - this can be the Cloud Access Key for a bucket owner or one of the + * following special symbols: + * (a) '*' - any principal authenticated user (i.e., any user with a registered Cloud Access Key) + * (b) 'A' - any anonymous principal (i.e., S3 request without an Authorization header) + */ + private void setDefaultAcls( String target, long objectId, int permission1, int permission2, String owner ) + { + SAclDao aclDao = new SAclDao(); + S3AccessControlList defaultAcl = new S3AccessControlList(); + + // -> object owner + S3Grant defaultGrant = new S3Grant(); + defaultGrant.setGrantee(SAcl.GRANTEE_USER); + defaultGrant.setCanonicalUserID( UserContext.current().getCanonicalUserId()); + defaultGrant.setPermission( permission1 ); + defaultAcl.addGrant( defaultGrant ); + + // -> bucket owner + defaultGrant = new S3Grant(); + defaultGrant.setGrantee(SAcl.GRANTEE_USER); + defaultGrant.setCanonicalUserID( owner ); + defaultGrant.setPermission( permission2 ); + defaultAcl.addGrant( defaultGrant ); + aclDao.save( target, objectId, defaultAcl ); + } + + public static PolicyAccess verifyPolicy( S3PolicyContext context ) + { + S3BucketPolicy policy = null; + + // Ordinarily a REST request will pass in an S3PolicyContext for a given bucket by this stage. The HttpServletRequest object + // should be held in the UserContext ready for extraction of the S3BucketPolicy. + // If there is an error in obtaining the request object or in loading the policy then log the failure and return a S3PolicyContext + // which indicates DEFAULT_DENY. Where there is no failure, the policy returned should be specific to the Canonical User ID of the requester. + + try { + // -> in SOAP the HttpServletRequest object is hidden and not passed around + if (null != context) { + context.setHttp( UserContext.current().getHttp()); + policy = loadPolicy( context ); + } + + if ( null != policy ) + return policy.eval(context, UserContext.current().getCanonicalUserId()); + else return PolicyAccess.DEFAULT_DENY; + } + catch( Exception e ) { + logger.error("verifyAccess - loadPolicy failed, bucket: " + context.getBucketName() + " policy ignored", e); + return PolicyAccess.DEFAULT_DENY; + } + } + + /** + * To determine access to a bucket or an object in a bucket evaluate first a define + * bucket policy and then any defined ACLs. + * + * @param context - all data needed for bucket policies + * @param target - used for ACL evaluation, object identifier + * @param targetId - used for ACL evaluation + * @param requestedPermission - ACL type access requested + * + * @throws ParseException, SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException + */ + public static void verifyAccess( S3PolicyContext context, String target, long targetId, int requestedPermission ) + { + switch( verifyPolicy( context ) ) { + case ALLOW: // overrides ACLs (?) + return; + + case DENY: + throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); + + case DEFAULT_DENY: + default: + accessAllowed( target, targetId, requestedPermission ); + break; + } + } + + /** + * This method verifies that the accessing client has the requested + * permission on the object/bucket/Acl represented by the tuple: + * + * For cases where an ACL is meant for any authenticated user we place a "*" for the + * Canonical User Id. N.B. - "*" is not a legal Cloud (Bridge) Access key. + * + * For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the + * Canonical User Id. N.B. - "A" is not a legal Cloud (Bridge) Access key. + */ + public static void accessAllowed( String target, long targetId, int requestedPermission ) + { + if (SAcl.PERMISSION_PASS == requestedPermission) return; + + SAclDao aclDao = new SAclDao(); + + // If an annoymous request, then canonicalUserId is an empty string + String userId = UserContext.current().getCanonicalUserId(); + if ( 0 == userId.length()) + { + // Is an anonymous principal ACL set for this ? + if (hasPermission( aclDao.listGrants( target, targetId, "A" ), requestedPermission )) return; + } + else + { + if (hasPermission( aclDao.listGrants( target, targetId, userId ), requestedPermission )) return; + // Or alternatively is there is any principal authenticated ACL set for this ? + if (hasPermission( aclDao.listGrants( target, targetId, "*" ), requestedPermission )) return; + } + // No privileges implies that no access is allowed in the case of an anonymous user + throw new PermissionDeniedException( "Access Denied - ACLs do not give user the required permission" ); + } + + /** + * This method assumes that the bucket has been tested to make sure it exists before + * it is called. + * + * @param context + * @return S3BucketPolicy + * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException, ParseException + */ + public static S3BucketPolicy loadPolicy( S3PolicyContext context ) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException + { + OrderedPair result = ServiceProvider.getInstance().getBucketPolicy( context.getBucketName()); + S3BucketPolicy policy = result.getFirst(); + if ( null == policy ) + { + // -> do we have to load it from the database (any other value means there is no policy)? + if (-1 == result.getSecond().intValue()) + { + BucketPolicyDao policyDao = new BucketPolicyDao(); + String policyInJson = policyDao.getPolicy( context.getBucketName()); + // -> place in cache that no policy exists in the database + if (null == policyInJson) { + ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), null); + return null; + } + + PolicyParser parser = new PolicyParser(); + policy = parser.parse( policyInJson, context.getBucketName()); + if (null != policy) + ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), policy); + } + } + return policy; + } + + public static void verifyBucketName( String bucketName, boolean useDNSGuidelines ) throws InvalidBucketName + { + // [A] To comply with Amazon S3 basic requirements, bucket names must meet the following conditions + // -> must be between 3 and 255 characters long + int size = bucketName.length(); + if (3 > size || size > 255) + throw new InvalidBucketName( bucketName + " is not between 3 and 255 characters long" ); + + // -> must start with a number or letter + if (!Character.isLetterOrDigit( bucketName.charAt( 0 ))) + throw new InvalidBucketName( bucketName + " does not start with a number or letter" ); + + // -> can contain lowercase letters, numbers, periods (.), underscores (_), and dashes (-) + // -> the bucket name can also contain uppercase letters but it is not recommended + for( int i=0; i < bucketName.length(); i++ ) + { + char next = bucketName.charAt(i); + if (Character.isLetter( next )) continue; + else if (Character.isDigit( next )) continue; + else if ('.' == next) continue; + else if ('_' == next) continue; + else if ('-' == next) continue; + else throw new InvalidBucketName( bucketName + " contains the invalid character: " + next ); + } + + // -> must not be formatted as an IP address (e.g., 192.168.5.4) + String[] parts = bucketName.split( "\\." ); + if (4 == parts.length) + { + try { + int first = Integer.parseInt( parts[0] ); + int second = Integer.parseInt( parts[1] ); + int third = Integer.parseInt( parts[2] ); + int fourth = Integer.parseInt( parts[3] ); + throw new InvalidBucketName( bucketName + " is formatted as an IP address" ); + } + catch( NumberFormatException e ) + {throw new InvalidBucketName( bucketName);} + } + + + // [B] To conform with DNS requirements, Amazon recommends following these additional guidelines when creating buckets + // -> bucket names should be between 3 and 63 characters long + if (useDNSGuidelines) + { + // -> bucket names should be between 3 and 63 characters long + if (3 > size || size > 63) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " is not between 3 and 63 characters long" ); + + // -> bucket names should not contain underscores (_) + int pos = bucketName.indexOf( '_' ); + if (-1 != pos) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain underscores" ); + + // -> bucket names should not end with a dash + if (bucketName.endsWith( "-" )) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not end with a dash" ); + + // -> bucket names cannot contain two, adjacent periods + pos = bucketName.indexOf( ".." ); + if (-1 != pos) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \"..\"" ); + + // -> bucket names cannot contain dashes next to periods (e.g., "my-.bucket.com" and "my.-bucket" are invalid) + if (-1 != bucketName.indexOf( "-." ) || -1 != bucketName.indexOf( ".-" )) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \".-\" or \"-.\"" ); + } + } + + private static boolean hasPermission( List privileges, int requestedPermission ) + { + ListIterator it = privileges.listIterator(); + while( it.hasNext()) + { + // True providing the requested permission is contained in one or the granted rights for this user. False otherwise. + SAcl rights = (SAcl)it.next(); + int permission = rights.getPermission(); + if (requestedPermission == (permission & requestedPermission)) return true; + } + return false; + } + + /** + * ifRange is true and ifUnmodifiedSince or IfMatch fails then we return the entire object (indicated by + * returning a -1 as the function result. + * + * @param ifCond - conditional get defined by these tests + * @param lastModified - value used on ifModifiedSince or ifUnmodifiedSince + * @param ETag - value used on ifMatch and ifNoneMatch + * @param ifRange - using an if-Range HTTP functionality + * @return -1 means return the entire object with an HTTP 200 (not a subrange) + */ + private int conditionPassed( S3ConditionalHeaders ifCond, Date lastModified, String ETag, boolean ifRange ) + { + if (null == ifCond) return 200; + + if (0 > ifCond.ifModifiedSince( lastModified )) + return 304; + + if (0 > ifCond.ifUnmodifiedSince( lastModified )) + return (ifRange ? -1 : 412); + + if (0 > ifCond.ifMatchEtag( ETag )) + return (ifRange ? -1 : 412); + + if (0 > ifCond.ifNoneMatchEtag( ETag )) + return 412; + + return 200; + } +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java index ee14c659cff..e775a2c33dd 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java @@ -1,96 +1,84 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.util.List; - -import com.cloud.bridge.model.SAcl; -<<<<<<< HEAD - -/** - * @author Kelven Yang -======= -import com.cloud.bridge.model.SBucket; -import com.cloud.bridge.service.exception.UnsupportedException; - -/** - * @author Kelven Yang, John Zucker - * Each relation holds - * a grantee - which is one of SAcl.GRANTEE_USER, SAcl.GRANTEE_ALLUSERS, SAcl.GRANTEE_AUTHENTICATED - * a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ, - * SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL - * canonicalUserID ->>>>>>> 6472e7b... Now really adding the renamed files! - */ -public class S3Grant { - private int grantee; // SAcl.GRANTEE_USER etc - private int permission; // SAcl.PERMISSION_READ etc - private String canonicalUserID; - - public S3Grant() { - } - - public int getGrantee() { - return grantee; - } - - public void setGrantee(int grantee) { - this.grantee = grantee; - } - - public int getPermission() { - return permission; - } - - public void setPermission(int permission) { - this.permission = permission; - } - - public String getCanonicalUserID() { - return canonicalUserID; - } - - public void setCanonicalUserID(String canonicalUserID) { - this.canonicalUserID = canonicalUserID; - } - -<<<<<<< HEAD -======= - /* Return an array of S3Grants holding the permissions of grantees by grantee type and their canonicalUserIds. - * Used by S3 engine to get ACL policy requests for buckets and objects. - */ ->>>>>>> 6472e7b... Now really adding the renamed files! - public static S3Grant[] toGrants(List grants) { - if(grants != null) - { - S3Grant[] entries = new S3Grant[grants.size()]; - int i = 0; - for(SAcl acl: grants) { - entries[i] = new S3Grant(); - entries[i].setGrantee(acl.getGranteeType()); - entries[i].setCanonicalUserID(acl.getGranteeCanonicalId()); - entries[i].setPermission(acl.getPermission()); - i++; - } - return entries; - } - return null; - } -<<<<<<< HEAD -======= - ->>>>>>> 6472e7b... Now really adding the renamed files! -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.util.List; + +import com.cloud.bridge.model.SAcl; +import com.cloud.bridge.model.SBucket; +import com.cloud.bridge.service.exception.UnsupportedException; + +/** + * @author Kelven Yang, John Zucker + * Each relation holds + * a grantee - which is one of SAcl.GRANTEE_USER, SAcl.GRANTEE_ALLUSERS, SAcl.GRANTEE_AUTHENTICATED + * a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ, + * SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL + * canonicalUserID + */ +public class S3Grant { + private int grantee; // SAcl.GRANTEE_USER etc + private int permission; // SAcl.PERMISSION_READ etc + private String canonicalUserID; + + public S3Grant() { + } + + public int getGrantee() { + return grantee; + } + + public void setGrantee(int grantee) { + this.grantee = grantee; + } + + public int getPermission() { + return permission; + } + + public void setPermission(int permission) { + this.permission = permission; + } + + public String getCanonicalUserID() { + return canonicalUserID; + } + + public void setCanonicalUserID(String canonicalUserID) { + this.canonicalUserID = canonicalUserID; + } + + /* Return an array of S3Grants holding the permissions of grantees by grantee type and their canonicalUserIds. + * Used by S3 engine to get ACL policy requests for buckets and objects. + */ + public static S3Grant[] toGrants(List grants) { + if(grants != null) + { + S3Grant[] entries = new S3Grant[grants.size()]; + int i = 0; + for(SAcl acl: grants) { + entries[i] = new S3Grant(); + entries[i].setGrantee(acl.getGranteeType()); + entries[i].setCanonicalUserID(acl.getGranteeCanonicalId()); + entries[i].setPermission(acl.getPermission()); + i++; + } + return entries; + } + return null; + } + +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3ListAllMyBucketsEntry.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3ListAllMyBucketsEntry.java index e11a7c146a4..81c32eb590b 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3ListAllMyBucketsEntry.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3ListAllMyBucketsEntry.java @@ -1,66 +1,58 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.util.Calendar; -<<<<<<< HEAD -======= -import java.util.TimeZone; ->>>>>>> 6472e7b... Now really adding the renamed files! - -/** - * @author Kelven Yang - */ -public class S3ListAllMyBucketsEntry { - private String name; - private Calendar creationDate; - - public S3ListAllMyBucketsEntry() { - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - -<<<<<<< HEAD - public Calendar getCreationDate() { - return creationDate; -======= - public Calendar getCreationDate() { - - // cal.setTimeZone(TimeZone.getTimeZone("Z")); - // java.util.Date d = cal.getTime(); - - // java.util.Date d = creationDate.getTime(); - // com.cloud.bridge.util.ISO8601SimpleDateTimeFormat sdf = new com.cloud.bridge.util.ISO8601SimpleDateTimeFormat(); - // sdf.format(d); - // java.lang.StringBuffer b = com.cloud.bridge.util.ISO8601SimpleDateTimeFormat.format(d); return b; - - return creationDate; - - - ->>>>>>> 6472e7b... Now really adding the renamed files! - } - - public void setCreationDate(Calendar creationDate) { - this.creationDate = creationDate; - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.util.Calendar; +import java.util.TimeZone; + +/** + * @author Kelven Yang + */ +public class S3ListAllMyBucketsEntry { + private String name; + private Calendar creationDate; + + public S3ListAllMyBucketsEntry() { + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Calendar getCreationDate() { + + // cal.setTimeZone(TimeZone.getTimeZone("Z")); + // java.util.Date d = cal.getTime(); + + // java.util.Date d = creationDate.getTime(); + // com.cloud.bridge.util.ISO8601SimpleDateTimeFormat sdf = new com.cloud.bridge.util.ISO8601SimpleDateTimeFormat(); + // sdf.format(d); + // java.lang.StringBuffer b = com.cloud.bridge.util.ISO8601SimpleDateTimeFormat.format(d); return b; + + return creationDate; + + + + } + + public void setCreationDate(Calendar creationDate) { + this.creationDate = creationDate; + } +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineRequest.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineRequest.java index 12eedf9b881..065d58f68f2 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineRequest.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineRequest.java @@ -1,119 +1,111 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; - -import javax.activation.DataHandler; - -/** -<<<<<<< HEAD - * @author Kelven Yang -======= - * @author Kelven Yang, John Zucker ->>>>>>> 6472e7b... Now really adding the renamed files! - */ -public class S3PutObjectInlineRequest extends S3Request { - protected String bucketName; - protected String key; - protected long contentLength; - protected S3MetaDataEntry[] metaEntries; - protected S3AccessControlList acl; -<<<<<<< HEAD - protected String cannedAccessPolicy; // -> REST only sets an acl with a simple keyword -======= - protected String cannedAccessPolicy; // Canned ACLs are public-read, public-read-write, private, authenticated-read or log-delivery-write ->>>>>>> 6472e7b... Now really adding the renamed files! - protected DataHandler data; - protected String dataAsString; - - public S3PutObjectInlineRequest() { - super(); - data = null; - } - - public String getBucketName() { - return bucketName; - } - - public void setBucketName(String bucketName) { - this.bucketName = bucketName; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public long getContentLength() { - return contentLength; - } - - public void setContentLength(long contentLength) { - this.contentLength = contentLength; - } - - public S3MetaDataEntry[] getMetaEntries() { - return metaEntries; - } - - public void setMetaEntries(S3MetaDataEntry[] metaEntries) { - this.metaEntries = metaEntries; - } - - public S3AccessControlList getAcl() { - return acl; - } - - public void setAcl(S3AccessControlList acl) { - this.acl = acl; - } - - public String getCannedAccess() { - return cannedAccessPolicy; - } - - public void setCannedAccess(String cannedAccessPolicy) { - this.cannedAccessPolicy = cannedAccessPolicy; - } - - public DataHandler getData() { - return data; - } - - public void setData(DataHandler data) { - this.data = data; - } - - public void setDataAsString( String data ) { - this.dataAsString = data; - } - - public InputStream getDataInputStream() throws IOException - { - if ( null == data ) - { - ByteArrayInputStream bs = new ByteArrayInputStream( dataAsString.getBytes()); - return bs; - } - else return data.getInputStream(); - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; + +import javax.activation.DataHandler; + +/** + * @author Kelven Yang, John Zucker + */ +public class S3PutObjectInlineRequest extends S3Request { + protected String bucketName; + protected String key; + protected long contentLength; + protected S3MetaDataEntry[] metaEntries; + protected S3AccessControlList acl; + protected String cannedAccessPolicy; // Canned ACLs are public-read, public-read-write, private, authenticated-read or log-delivery-write + protected DataHandler data; + protected String dataAsString; + + public S3PutObjectInlineRequest() { + super(); + data = null; + } + + public String getBucketName() { + return bucketName; + } + + public void setBucketName(String bucketName) { + this.bucketName = bucketName; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public long getContentLength() { + return contentLength; + } + + public void setContentLength(long contentLength) { + this.contentLength = contentLength; + } + + public S3MetaDataEntry[] getMetaEntries() { + return metaEntries; + } + + public void setMetaEntries(S3MetaDataEntry[] metaEntries) { + this.metaEntries = metaEntries; + } + + public S3AccessControlList getAcl() { + return acl; + } + + public void setAcl(S3AccessControlList acl) { + this.acl = acl; + } + + public String getCannedAccess() { + return cannedAccessPolicy; + } + + public void setCannedAccess(String cannedAccessPolicy) { + this.cannedAccessPolicy = cannedAccessPolicy; + } + + public DataHandler getData() { + return data; + } + + public void setData(DataHandler data) { + this.data = data; + } + + public void setDataAsString( String data ) { + this.dataAsString = data; + } + + public InputStream getDataInputStream() throws IOException + { + if ( null == data ) + { + ByteArrayInputStream bs = new ByteArrayInputStream( dataAsString.getBytes()); + return bs; + } + else return data.getInputStream(); + } +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineResponse.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineResponse.java index 9a88eccdd1d..b9241458420 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineResponse.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3PutObjectInlineResponse.java @@ -1,74 +1,67 @@ -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.service.core.s3; - -import java.util.Calendar; - -/** -<<<<<<< HEAD - * @author Kelven Yang -======= - * @author Kelven Yang, John Zucker ->>>>>>> 6472e7b... Now really adding the renamed files! - */ -public class S3PutObjectInlineResponse extends S3Response { - protected String ETag; - protected Calendar lastModified; - protected String version; - protected int uploadId; - - public S3PutObjectInlineResponse() { - super(); - uploadId = -1; - } - -<<<<<<< HEAD -======= - // add ETag header computed as Base64 MD5 whenever object is uploaded or updated - // the Base64 is represented in lowercase ->>>>>>> 6472e7b... Now really adding the renamed files! - public String getETag() { - return ETag; - } - - public void setETag(String eTag) { - this.ETag = eTag; - } - - public Calendar getLastModified() { - return lastModified; - } - - public void setLastModified(Calendar lastModified) { - this.lastModified = lastModified; - } - - public String getVersion() { - return version; - } - - public void setVersion(String version) { - this.version = version; - } - - public int getUploadId() { - return uploadId; - } - - public void setUploadId(int uploadId) { - this.uploadId = uploadId; - } -} +/* + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.bridge.service.core.s3; + +import java.util.Calendar; + +/** + * @author Kelven Yang, John Zucker + */ +public class S3PutObjectInlineResponse extends S3Response { + protected String ETag; + protected Calendar lastModified; + protected String version; + protected int uploadId; + + public S3PutObjectInlineResponse() { + super(); + uploadId = -1; + } + + // add ETag header computed as Base64 MD5 whenever object is uploaded or updated + // the Base64 is represented in lowercase + public String getETag() { + return ETag; + } + + public void setETag(String eTag) { + this.ETag = eTag; + } + + public Calendar getLastModified() { + return lastModified; + } + + public void setLastModified(Calendar lastModified) { + this.lastModified = lastModified; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + public int getUploadId() { + return uploadId; + } + + public void setUploadId(int uploadId) { + this.uploadId = uploadId; + } +} \ No newline at end of file diff --git a/awsapi/src/com/cloud/bridge/util/CloudSessionFactory.java b/awsapi/src/com/cloud/bridge/util/CloudSessionFactory.java index 8657836bef1..a689900ea3d 100644 --- a/awsapi/src/com/cloud/bridge/util/CloudSessionFactory.java +++ b/awsapi/src/com/cloud/bridge/util/CloudSessionFactory.java @@ -16,41 +16,30 @@ package com.cloud.bridge.util; import java.io.File; -<<<<<<< HEAD import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Properties; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! import org.hibernate.Session; import org.hibernate.SessionFactory; import org.hibernate.cfg.Configuration; -<<<<<<< HEAD import org.jasypt.encryption.pbe.StandardPBEStringEncryptor; import org.jasypt.properties.EncryptableProperties; import org.apache.log4j.Logger; -======= ->>>>>>> 6472e7b... Now really adding the renamed files! - /** * @author Kelven Yang */ public class CloudSessionFactory { private static CloudSessionFactory instance; -<<<<<<< HEAD public static final Logger logger = Logger.getLogger(CloudSessionFactory.class); -======= ->>>>>>> 6472e7b... Now really adding the renamed files! private SessionFactory factory; private CloudSessionFactory() { Configuration cfg = new Configuration(); File file = ConfigurationHelper.findConfigurationFile("hibernate.cfg.xml"); -<<<<<<< HEAD File propertiesFile = ConfigurationHelper.findConfigurationFile("db.properties"); Properties dbProp = null; @@ -80,15 +69,10 @@ public class CloudSessionFactory { // -======= - - // ->>>>>>> 6472e7b... Now really adding the renamed files! // we are packaging hibernate mapping files along with the class files, // make sure class loader use the same class path when initializing hibernate mapping. // This is important when we are deploying and testing at different environment (Tomcat/JUnit test runner) // -<<<<<<< HEAD if(file != null && dbProp != null){ Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader()); cfg.configure(file); @@ -109,10 +93,6 @@ public class CloudSessionFactory { logger.warn("Unable to open load db configuration"); throw new RuntimeException("nable to open load db configuration"); } -======= - Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader()); - factory = cfg.configure(file).buildSessionFactory(); ->>>>>>> 6472e7b... Now really adding the renamed files! } public synchronized static CloudSessionFactory getInstance() { diff --git a/awsapi/src/com/cloud/bridge/util/HeaderParam.java b/awsapi/src/com/cloud/bridge/util/HeaderParam.java index d6d380f6881..b510aa518f8 100644 --- a/awsapi/src/com/cloud/bridge/util/HeaderParam.java +++ b/awsapi/src/com/cloud/bridge/util/HeaderParam.java @@ -5,23 +5,16 @@ public class HeaderParam { protected String name; protected String value; -<<<<<<< HEAD -======= - ->>>>>>> 6472e7b... Now really adding the renamed files! public HeaderParam() { name = null; value = null; } -<<<<<<< HEAD -======= public HeaderParam (String name, String value) { this.name = name; this.name = value; } ->>>>>>> 6472e7b... Now really adding the renamed files! public void setName( String name ) { this.name = name; } diff --git a/awsapi/src/com/cloud/bridge/util/RestAuth.java b/awsapi/src/com/cloud/bridge/util/RestAuth.java index 90d8e414bda..9fa88de80d4 100644 --- a/awsapi/src/com/cloud/bridge/util/RestAuth.java +++ b/awsapi/src/com/cloud/bridge/util/RestAuth.java @@ -1,4 +1,3 @@ -<<<<<<< HEAD /* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. * @@ -358,407 +357,3 @@ public class RestAuth { return result.trim(); } } -======= -/* - * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.cloud.bridge.util; - -import java.security.InvalidKeyException; -import java.security.SignatureException; -import java.util.*; -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; - -import javax.crypto.Mac; -import javax.crypto.spec.SecretKeySpec; - -import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; - - -/** - * This class expects that the caller pulls the required headers from the standard - * HTTPServeletRequest structure. This class is responsible for providing the - * RFC2104 calculation to ensure that the signature is valid for the signing string. - * The signing string is a representation of the request. - * Notes are given below on what values are expected. - * This class is used for the Authentication check for REST requests and Query String - * Authentication requests. - * - * @author Kelven Yang, John Zucker, Salvatore Orlando - */ - -public class RestAuth { - protected final static Logger logger = Logger.getLogger(RestAuth.class); - - // TreeMap: used when constructing the CanonicalizedAmzHeaders Element of the StringToSign - protected TreeMap AmazonHeaders = null; // not always present - protected String bucketName = null; // not always present - protected String queryString = null; // for CanonicalizedResource - only interested in a string starting with particular values - protected String uriPath = null; // only interested in the resource path - protected String date = null; // only if x-amz-date is not set - protected String contentType = null; // not always present - protected String contentMD5 = null; // not always present - protected boolean amzDateSet = false; - protected boolean useSubDomain = false; - - protected Set allowedQueryParams; - - public RestAuth() { - // these must be lexicographically sorted - AmazonHeaders = new TreeMap(); - allowedQueryParams = new HashSet() {{ - add("acl"); - add("lifecycle"); - add("location"); - add("logging"); - add("notification"); - add("partNumber"); - add("policy"); - add("requestPayment"); - add("torrent"); - add("uploadId"); - add("uploads"); - add("versionId"); - add("versioning"); - add("versions"); - add("website"); - }}; - } - - public RestAuth(boolean useSubDomain) { - //invoke the other constructor - this(); - this.useSubDomain = useSubDomain; - } - - public void setUseSubDomain(boolean value) { - useSubDomain = value; - } - - public boolean getUseSubDomain() { - return useSubDomain; - } - - /** - * This header is used iff the "x-amz-date:" header is not defined. - * Value is used in constructing the StringToSign for signature verification. - * - * @param date - the contents of the "Date:" header, skipping the 'Date:' preamble. - * OR pass in the value of the "Expires=" query string parameter passed in - * for "Query String Authentication". - */ - public void setDateHeader( String date ) { - if (this.amzDateSet) return; - if (null != date) date = date.trim(); - this.date = date; - } - - /** - * Value is used in constructing the StringToSign for signature verification. - * - * @param type - the contents of the "Content-Type:" header, skipping the 'Content-Type:' preamble. - */ - public void setContentTypeHeader( String type ) { - if (null != type) type = type.trim(); - this.contentType = type; - } - - - /** - * Value is used in constructing the StringToSign for signature verification. - * @param type - the contents of the "Content-MD5:" header, skipping the 'Content-MD5:' preamble. - */ - public void setContentMD5Header( String md5 ) { - if (null != md5) md5 = md5.trim(); - this.contentMD5 = md5; - } - - - /** - * The bucket name can be in the "Host:" header but it does not have to be. It can - * instead be in the uriPath as the first step in the path. - * - * Used as part of the CanonalizedResource element of the StringToSign. - * If we get "Host: static.johnsmith.net:8080", then the bucket name is "static.johnsmith.net" - * - * @param header - contents of the "Host:" header, skipping the 'Host:' preamble. - */ - public void setHostHeader( String header ) { - if (null == header) { - this.bucketName = null; - return; - } - - // -> is there a port on the name? - header = header.trim(); - int offset = header.indexOf( ":" ); - if (-1 != offset) header = header.substring( 0, offset ); - this.bucketName = header; - } - - - /** - * Used as part of the CanonalizedResource element of the StringToSign. - * CanonicalizedResource = [ "/" + Bucket ] + - * + [sub-resource] - * The list of sub-resources that must be included when constructing the CanonicalizedResource Element are: acl, lifecycle, location, - * logging, notification, partNumber, policy, requestPayment, torrent, uploadId, uploads, versionId, versioning, versions and website. - * (http://docs.amazonwebservices.com/AmazonS3/latest/dev/RESTAuthentication.html) - * @param query - results from calling "HttpServletRequest req.getQueryString()" - */ - public void setQueryString( String query ) { - if (null == query) { - this.queryString = null; - return; - } - - // Sub-resources (i.e.: query params) must be lex sorted - Set subResources = new TreeSet(); - - String [] queryParams = query.split("&"); - StringBuffer builtQuery= new StringBuffer(); - for (String queryParam:queryParams) { - // lookup parameter name - String paramName = queryParam.split("=")[0]; - if (allowedQueryParams.contains(paramName)) { - subResources.add(queryParam); - } - } - for (String subResource:subResources) { - builtQuery.append(subResource + "&"); - } - // If anything inside the string buffer, add a "?" at the beginning, - // and then remove the last '&' - if (builtQuery.length() > 0) { - builtQuery.insert(0, "?"); - builtQuery.deleteCharAt(builtQuery.length()-1); - } - this.queryString = builtQuery.toString(); - } - - - /** - * Used as part of the CanonalizedResource element of the StringToSign. - * Append the path part of the un-decoded HTTP Request-URI, up-to but not including the query string. - * - * @param path - - results from calling "HttpServletRequest req.getPathInfo()" - */ - public void addUriPath( String path ) { - if (null != path) path = path.trim(); - this.uriPath = path; - } - - - /** - * Pass in each complete Amazon header found in the HTTP request one at a time. - * Each Amazon header added will become part of the signature calculation. - * We are using a TreeMap here because of the S3 definition: - * "Sort the collection of headers lexicographically by header name." - * - * @param headerAndValue - needs to be the complete amazon header (i.e., starts with "x-amz"). - */ - public void addAmazonHeader( String headerAndValue ) { - if (null == headerAndValue) return; - - String canonicalized = null; - - // [A] First Canonicalize the header and its value - // -> we use the header 'name' as the key since we have to sort on that - int offset = headerAndValue.indexOf( ":" ); - String header = headerAndValue.substring( 0, offset+1 ).toLowerCase(); - String value = headerAndValue.substring( offset+1 ).trim(); - - // -> RFC 2616, Section 4.2: unfold the header's value by replacing linear white space with a single space character - // -> does the HTTPServeletReq already do this for us? - value = value.replaceAll( " ", " " ); // -> multiple spaces to one space - value = value.replaceAll( "(\r\n|\t|\n)", " " ); // -> CRLF, tab, and LF to one space - - - // [B] Does this header already exist? - if ( AmazonHeaders.containsKey( header )) { - // -> combine header fields with the same name into one "header-name:comma-separated-value-list" pair as prescribed by RFC 2616, section 4.2, without any white-space between values. - canonicalized = AmazonHeaders.get( header ); - canonicalized = new String( canonicalized + "," + value + "\n" ); - canonicalized = canonicalized.replaceAll( "\n,", "," ); // remove the '\n' from the first stored value - } - else canonicalized = new String( header + value + "\n" ); // -> as per spec, no space between header and its value - - AmazonHeaders.put( header, canonicalized ); - - // [C] "x-amz-date:" takes precedence over the "Date:" header - if (header.equals( "x-amz-date:" )) { - this.amzDateSet = true; - if (null != this.date) this.date = null; - } - } - - - /** - * The request is authenticated if we can regenerate the same signature given - * on the request. Before calling this function make sure to set the header values - * defined by the public values above. - * - * @param httpVerb - the type of HTTP request (e.g., GET, PUT) - * @param secretKey - value obtained from the AWSAccessKeyId - * @param signature - the signature we are trying to recreate, note can be URL-encoded - * - * @throws SignatureException - * - * @return true if request has been authenticated, false otherwise - * @throws UnsupportedEncodingException - */ - - public boolean verifySignature( String httpVerb, String secretKey, String signature ) - throws SignatureException, UnsupportedEncodingException { - - if (null == httpVerb || null == secretKey || null == signature) return false; - - httpVerb = httpVerb.trim(); - secretKey = secretKey.trim(); - signature = signature.trim(); - - // First calculate the StringToSign after the caller has initialized all the header values - String StringToSign = genStringToSign( httpVerb ); - String calSig = calculateRFC2104HMAC( StringToSign, secretKey ); - // Was the passed in signature URL encoded? (it must be base64 encoded) - int offset = signature.indexOf( "%" ); - if (-1 != offset) signature = URLDecoder.decode( signature, "UTF-8" ); - - boolean match = signature.equals( calSig ); - if (!match) - logger.error( "Signature mismatch, [" + signature + "] [" + calSig + "] over [" + StringToSign + "]" ); - - return match; - } - - - /** - * This function generates the single string that will be used to sign with a users - * secret key. - * - * StringToSign = HTTP-Verb + "\n" + - * Content-MD5 + "\n" + - * Content-Type + "\n" + - * Date + "\n" + - * CanonicalizedAmzHeaders + - * CanonicalizedResource; - * - * @return The single StringToSign or null. - */ - private String genStringToSign( String httpVerb ) { - StringBuffer canonicalized = new StringBuffer(); - String temp = null; - String canonicalizedResourceElement = genCanonicalizedResourceElement(); - canonicalized.append( httpVerb ).append( "\n" ); - if ( (null != this.contentMD5) ) - canonicalized.append( this.contentMD5 ); - canonicalized.append( "\n" ); - - if ( (null != this.contentType) ) - canonicalized.append( this.contentType ); - canonicalized.append( "\n" ); - - if (null != this.date) - canonicalized.append( this.date ); - - canonicalized.append( "\n" ); - - if (null != (temp = genCanonicalizedAmzHeadersElement())) canonicalized.append( temp ); - if (null != canonicalizedResourceElement) canonicalized.append( canonicalizedResourceElement ); - - if ( 0 == canonicalized.length()) - return null; - - return canonicalized.toString(); - } - - - /** - * CanonicalizedResource represents the Amazon S3 resource targeted by the request. - * CanonicalizedResource = [ "/" + Bucket ] + - * + - * [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; - * - * @return A single string representing CanonicalizedResource or null. - */ - private String genCanonicalizedResourceElement() { - StringBuffer canonicalized = new StringBuffer(); - - if(this.useSubDomain && this.bucketName != null) - canonicalized.append( "/" ).append( this.bucketName ); - - if (null != this.uriPath ) canonicalized.append( this.uriPath ); - if (null != this.queryString) canonicalized.append( this.queryString ); - - if ( 0 == canonicalized.length()) - return null; - - return canonicalized.toString(); - } - - - /** - * Construct the Canonicalized Amazon headers element of the StringToSign by - * concatenating all headers in the TreeMap into a single string. - * - * @return A single string with all the Amazon headers glued together, or null - * if no Amazon headers appeared in the request. - */ - private String genCanonicalizedAmzHeadersElement() { - Collection headers = AmazonHeaders.values(); - Iterator itr = headers.iterator(); - StringBuffer canonicalized = new StringBuffer(); - - while( itr.hasNext()) - canonicalized.append( itr.next()); - - if ( 0 == canonicalized.length()) - return null; - - return canonicalized.toString(); - } - - - /** - * Create a signature by the following method: - * new String( Base64( SHA1( key, byte array ))) - * - * @param signIt - the data to generate a keyed HMAC over - * @param secretKey - the user's unique key for the HMAC operation - * @return String - the recalculated string - * @throws SignatureException - */ - private String calculateRFC2104HMAC( String signIt, String secretKey ) - throws SignatureException { - String result = null; - try { - SecretKeySpec key = new SecretKeySpec( secretKey.getBytes(), "HmacSHA1" ); - Mac hmacSha1 = Mac.getInstance( "HmacSHA1" ); - hmacSha1.init( key ); - byte [] rawHmac = hmacSha1.doFinal( signIt.getBytes()); - result = new String( Base64.encodeBase64( rawHmac )); - } - catch( InvalidKeyException e ) { - throw new SignatureException( "Failed to generate keyed HMAC on REST request because key " + secretKey + " is invalid" + e.getMessage()); - } - catch (Exception e) { - throw new SignatureException( "Failed to generate keyed HMAC on REST request: " + e.getMessage()); - } - return result.trim(); - } -} ->>>>>>> 6472e7b... Now really adding the renamed files! diff --git a/awsapi/src/com/cloud/bridge/util/StringHelper.java b/awsapi/src/com/cloud/bridge/util/StringHelper.java index 1c185a899af..2220346b44d 100644 --- a/awsapi/src/com/cloud/bridge/util/StringHelper.java +++ b/awsapi/src/com/cloud/bridge/util/StringHelper.java @@ -19,26 +19,13 @@ import java.io.IOException; import java.io.InputStream; /** -<<<<<<< HEAD - * @author Kelven -======= * @author Kelven, John Zucker * Provide converters for regexp (case independent tokens) * Also provide upper case or lower case (default) converters for byte array b[] to hex String ->>>>>>> 6472e7b... Now really adding the renamed files! */ public class StringHelper { public static final String EMPTY_STRING = ""; -<<<<<<< HEAD - private static final char[] hexChars = { '0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F' }; - - public static String toHexString(byte[] b) { - StringBuffer sb = new StringBuffer(); - for (int i = 0; i < b.length; i++) { - sb.append(hexChars[ (int)(((int)b[i] >> 4) & 0x0f)]); - sb.append(hexChars[ (int)(((int)b[i]) & 0x0f)]); -======= private static final char[] hexCharsUpperCase = { '0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F' }; private static final char[] hexCharsLowerCase = { '0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f' }; @@ -61,7 +48,6 @@ public class StringHelper { for (int i = 0; i < b.length; i++) { sb.append(hexCharsLowerCase[ (int)(((int)b[i] >> 4) & 0x0f)]); sb.append(hexCharsLowerCase[ (int)(((int)b[i]) & 0x0f)]); ->>>>>>> 6472e7b... Now really adding the renamed files! } return sb.toString(); } diff --git a/awsapi/src/com/cloud/stack/CloudStackApi.java b/awsapi/src/com/cloud/stack/CloudStackApi.java index d0bf1f20314..73d261cae38 100644 --- a/awsapi/src/com/cloud/stack/CloudStackApi.java +++ b/awsapi/src/com/cloud/stack/CloudStackApi.java @@ -1194,11 +1194,7 @@ public class CloudStackApi { CloudStackCommand cmd = new CloudStackCommand(ApiConstants.DELETE_SNAPSHOT); if (cmd != null) cmd.setParam(ApiConstants.ID, id); -<<<<<<< HEAD return _client.call(cmd, apiKey, secretKey, true, ApiConstants.DELETE_SNAPSHOT_RESPONSE, null, CloudStackInfoResponse.class); -======= - return _client.call(cmd, apiKey, secretKey, true, ApiConstants.DELETE_SNAPSHOT_RESPONSE, ApiConstants.SNAPSHOT, CloudStackInfoResponse.class); ->>>>>>> 6472e7b... Now really adding the renamed files! } /** @@ -1239,11 +1235,7 @@ public class CloudStackApi { if (id != null) cmd.setParam(ApiConstants.ID, id); if (ids != null) cmd.setParam(ApiConstants.IDS, ids); } -<<<<<<< HEAD return _client.call(cmd, apiKey, secretKey, false, ApiConstants.DELETE_SNAPSHOT_POLICIES_RESPONSE, null, CloudStackInfoResponse.class); -======= - return _client.call(cmd, apiKey, secretKey, false, ApiConstants.DELETE_SNAPSHOT_POLICIES_RESPONSE, ApiConstants.SNAPSHOT, CloudStackInfoResponse.class); ->>>>>>> 6472e7b... Now really adding the renamed files! } /** @@ -1955,7 +1947,6 @@ public class CloudStackApi { * @param zoneId * @param account * @param domainId -<<<<<<< HEAD * @param isDefault * @param startIp * @param endIp @@ -1963,26 +1954,13 @@ public class CloudStackApi { * @param netmask * @param isShared * @param networkDomain -======= - * @param endIp - * @param gateway - * @param isDefault - * @param isShared - * @param netmask - * @param networkDomain - * @param startIp ->>>>>>> 6472e7b... Now really adding the renamed files! * @param tags * @param vlan * @return * @throws Exception */ public CloudStackNetwork createNetwork(String displayText, String name, String networkOfferingId, String zoneId, String account, String domainId, -<<<<<<< HEAD Boolean isDefault, String startIp, String endIp, String gateway, String netmask, Boolean isShared, String networkDomain, String tags, -======= - String endIp, String gateway, Boolean isDefault, Boolean isShared, String netmask, String networkDomain, String startIp, String tags, ->>>>>>> 6472e7b... Now really adding the renamed files! String vlan) throws Exception { CloudStackCommand cmd = new CloudStackCommand(ApiConstants.CREATE_NETWORK); if (cmd != null) { diff --git a/awsapi/src/com/cloud/stack/models/ApiConstants.java b/awsapi/src/com/cloud/stack/models/ApiConstants.java index 46e33de70de..e8e172e2bd3 100644 --- a/awsapi/src/com/cloud/stack/models/ApiConstants.java +++ b/awsapi/src/com/cloud/stack/models/ApiConstants.java @@ -283,11 +283,7 @@ public class ApiConstants { public static final String LIST_SNAPSHOT_POLICIES = "listSnapshotPolicies"; public static final String LIST_SNAPSHOT_POLICIES_RESPONSE = "listsnapshotpoliciesresponse"; public static final String LIST_SNAPSHOTS = "listSnapshots"; -<<<<<<< HEAD public static final String LIST_SNAPSHOTS_RESPONSE = "listsnapshotsresponse"; -======= - public static final String LIST_SNAPSHOTS_RESPONSE = "listsnapshotsresponsee"; ->>>>>>> 6472e7b... Now really adding the renamed files! public static final String LIST_SSH_KEY_PAIRS = "listSSHKeyPairs"; public static final String LIST_SSH_KEY_PAIRS_RESPONSE = "listsshkeypairsresponse"; public static final String LIST_TEMPLATE_PERMISSIONS = "listTemplatePermissions"; @@ -386,11 +382,7 @@ public class ApiConstants { public static final String RESTART_NETWORK = "restartNetwork"; public static final String RESTART_NETWORK_RESPONSE = "restartnetworkresponse"; public static final String REVOKE_SECURITY_GROUP_INGRESS = "revokeSecurityGroupIngress"; -<<<<<<< HEAD public static final String REVOKE_SECURITY_GROUP_INGRESS_RESPONSE = "revokesecuritygroupingress"; -======= - public static final String REVOKE_SECURITY_GROUP_INGRESS_RESPONSE = "revokesecuritygroupingressresponse"; ->>>>>>> 6472e7b... Now really adding the renamed files! public static final String ROOT_DEVICE_ID = "rootdeviceid"; public static final String ROOT_DEVICE_TYPE = "rootdevicetype"; public static final String RULE_ID = "ruleid"; diff --git a/awsapi/src/com/cloud/stack/models/CloudStackIngressRule.java b/awsapi/src/com/cloud/stack/models/CloudStackIngressRule.java index 4af4d0e3908..4f54114e8fc 100644 --- a/awsapi/src/com/cloud/stack/models/CloudStackIngressRule.java +++ b/awsapi/src/com/cloud/stack/models/CloudStackIngressRule.java @@ -20,11 +20,7 @@ import com.google.gson.annotations.SerializedName; public class CloudStackIngressRule { @SerializedName(ApiConstants.RULE_ID) -<<<<<<< HEAD private String ruleId; -======= - private Long ruleId; ->>>>>>> 6472e7b... Now really adding the renamed files! @SerializedName(ApiConstants.PROTOCOL) private String protocol; @@ -53,11 +49,7 @@ public class CloudStackIngressRule { public CloudStackIngressRule() { } -<<<<<<< HEAD public String getRuleId() { -======= - public Long getRuleId() { ->>>>>>> 6472e7b... Now really adding the renamed files! return ruleId; } diff --git a/awsapi/src/com/cloud/stack/models/CloudStackNic.java b/awsapi/src/com/cloud/stack/models/CloudStackNic.java index 219b21a4b0d..f6156eec2d9 100644 --- a/awsapi/src/com/cloud/stack/models/CloudStackNic.java +++ b/awsapi/src/com/cloud/stack/models/CloudStackNic.java @@ -20,11 +20,7 @@ import com.google.gson.annotations.SerializedName; public class CloudStackNic { @SerializedName(ApiConstants.ID) -<<<<<<< HEAD private String id; -======= - private Long id; ->>>>>>> 6472e7b... Now really adding the renamed files! @SerializedName(ApiConstants.BROADCAST_URI) private String broadcastUri; @@ -48,11 +44,7 @@ public class CloudStackNic { private String netmask; @SerializedName(ApiConstants.NETWORK_ID) -<<<<<<< HEAD private String networkid; -======= - private Long networkid; ->>>>>>> 6472e7b... Now really adding the renamed files! @SerializedName(ApiConstants.TRAFFIC_TYPE) private String trafficType; @@ -63,19 +55,11 @@ public class CloudStackNic { public CloudStackNic() { } -<<<<<<< HEAD public String getId() { return id; } public String getNetworkid() { -======= - public Long getId() { - return id; - } - - public Long getNetworkid() { ->>>>>>> 6472e7b... Now really adding the renamed files! return networkid; } diff --git a/awsapi/src/com/cloud/stack/models/CloudStackResourceLimit.java b/awsapi/src/com/cloud/stack/models/CloudStackResourceLimit.java index 6edaa969868..1f9bd677d6c 100644 --- a/awsapi/src/com/cloud/stack/models/CloudStackResourceLimit.java +++ b/awsapi/src/com/cloud/stack/models/CloudStackResourceLimit.java @@ -21,11 +21,7 @@ public class CloudStackResourceLimit { @SerializedName(ApiConstants.ACCOUNT) private String accountName; @SerializedName(ApiConstants.DOMAIN_ID) -<<<<<<< HEAD private String domainId; -======= - private Long domainId; ->>>>>>> 6472e7b... Now really adding the renamed files! @SerializedName(ApiConstants.DOMAIN) private String domainName; @SerializedName(ApiConstants.RESOURCE_TYPE) @@ -41,15 +37,9 @@ public class CloudStackResourceLimit { return accountName; } -<<<<<<< HEAD public String getDomainId() { return domainId; } -======= - public Long getDomainId() { - return domainId; - } ->>>>>>> 6472e7b... Now really adding the renamed files! public String getDomainName() { return domainName; diff --git a/awsapi/src/com/cloud/stack/models/CloudStackSecurityGroupIngress.java b/awsapi/src/com/cloud/stack/models/CloudStackSecurityGroupIngress.java index fdcc4686f4d..f4d4291637a 100644 --- a/awsapi/src/com/cloud/stack/models/CloudStackSecurityGroupIngress.java +++ b/awsapi/src/com/cloud/stack/models/CloudStackSecurityGroupIngress.java @@ -36,11 +36,7 @@ public class CloudStackSecurityGroupIngress { @SerializedName(ApiConstants.PROTOCOL) private String protocol; @SerializedName(ApiConstants.RULE_ID) -<<<<<<< HEAD private String ruleId; -======= - private Long ruleId; ->>>>>>> 6472e7b... Now really adding the renamed files! @SerializedName(ApiConstants.SECURITY_GROUP_NAME) private String securityGroupName; @SerializedName(ApiConstants.START_PORT) @@ -106,11 +102,7 @@ public class CloudStackSecurityGroupIngress { /** * @return the ruleId */ -<<<<<<< HEAD public String getRuleId() { -======= - public Long getRuleId() { ->>>>>>> 6472e7b... Now really adding the renamed files! return ruleId; } diff --git a/awsapi/src/com/cloud/stack/models/CloudStackServiceOffering.java b/awsapi/src/com/cloud/stack/models/CloudStackServiceOffering.java index 2cf0e34d3b0..9dde25a553a 100644 --- a/awsapi/src/com/cloud/stack/models/CloudStackServiceOffering.java +++ b/awsapi/src/com/cloud/stack/models/CloudStackServiceOffering.java @@ -23,13 +23,8 @@ import com.google.gson.annotations.SerializedName; * */ public class CloudStackServiceOffering { -<<<<<<< HEAD @SerializedName(ApiConstants.ID) private String id; -======= - @SerializedName(ApiConstants.ID) - private Long id; ->>>>>>> 6472e7b... Now really adding the renamed files! @SerializedName(ApiConstants.CPU_NUMBER) private Long cpuNumber; @SerializedName(ApiConstants.CPU_SPEED) @@ -43,11 +38,7 @@ public class CloudStackServiceOffering { @SerializedName(ApiConstants.DOMAIN) private String domain; @SerializedName(ApiConstants.DOMAIN_ID) -<<<<<<< HEAD private String domainId; -======= - private Long domainId; ->>>>>>> 6472e7b... Now really adding the renamed files! @SerializedName(ApiConstants.HOST_TAGS) private String hostTags; @SerializedName(ApiConstants.IS_SYSTEM) @@ -58,11 +49,7 @@ public class CloudStackServiceOffering { private Long memory; @SerializedName(ApiConstants.NAME) private String name; -<<<<<<< HEAD @SerializedName(ApiConstants.OFFER_HA) -======= - @SerializedName(ApiConstants.OFFER_HA) ->>>>>>> 6472e7b... Now really adding the renamed files! private Boolean offerHa; @SerializedName(ApiConstants.STORAGE_TYPE) private String storageType; @@ -81,7 +68,6 @@ public class CloudStackServiceOffering { /** * @return the id */ -<<<<<<< HEAD public String getId() { return id; } @@ -89,11 +75,6 @@ public class CloudStackServiceOffering { public void setId(String id) { this.id = id; } -======= - public Long getId() { - return id; - } ->>>>>>> 6472e7b... Now really adding the renamed files! /** * @return the cpuNumber @@ -140,7 +121,6 @@ public class CloudStackServiceOffering { /** * @return the domainId */ -<<<<<<< HEAD public String getDomainId() { return domainId; } @@ -150,13 +130,6 @@ public class CloudStackServiceOffering { } /** -======= - public Long getDomainId() { - return domainId; - } - - /** ->>>>>>> 6472e7b... Now really adding the renamed files! * @return the hostTags */ public String getHostTags() { @@ -190,15 +163,11 @@ public class CloudStackServiceOffering { public String getName() { return name; } -<<<<<<< HEAD public void setName(String name) { this.name = name; } - -======= ->>>>>>> 6472e7b... Now really adding the renamed files! /** * @return the offerHa */ diff --git a/awsapi/test/com/cloud/gate/util/CloudStackClientTestCase.java b/awsapi/test/com/cloud/gate/util/CloudStackClientTestCase.java index d6cb9bc5525..8e3b18a6d1a 100644 --- a/awsapi/test/com/cloud/gate/util/CloudStackClientTestCase.java +++ b/awsapi/test/com/cloud/gate/util/CloudStackClientTestCase.java @@ -26,7 +26,7 @@ public class CloudStackClientTestCase extends BaseTestCase { command.setParam("id", "246446"); try { CloudStackUserVm vm = client.call(command, API_KEY, SECRET_KEY, true, "startvirtualmachineresponse", "virtualmachine", CloudStackUserVm.class); - Assert.assertTrue(vm.getId() == 246446); + Assert.assertTrue(vm.getId() == "246446"); } catch(Exception e) { logger.error("Unexpected exception ", e); } diff --git a/build.xml b/build.xml index db3d6ebcce9..e9e4bef6145 100755 --- a/build.xml +++ b/build.xml @@ -27,5 +27,5 @@ - + diff --git a/build/build-cloud-bridge.properties b/build/build-cloud-bridge.properties deleted file mode 100644 index 95de98ac6a6..00000000000 --- a/build/build-cloud-bridge.properties +++ /dev/null @@ -1,12 +0,0 @@ -company.major.version=1 -company.minor.version=0 -company.patch.version=2.RC4 - -target.compat.version=1.6 -source.compat.version=1.6 - -debug=true -build.type=developer -debuglevel=lines,source,vars -deprecation=off - diff --git a/build/build-cloud-bridge.xml b/build/build-cloud-bridge.xml deleted file mode 100644 index 06aac1e027a..00000000000 --- a/build/build-cloud-bridge.xml +++ /dev/null @@ -1,433 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/build/developer.xml b/build/developer.xml index 27e3dc0547a..0d081006e5a 100755 --- a/build/developer.xml +++ b/build/developer.xml @@ -154,7 +154,7 @@ - + diff --git a/build/license/eula.en.html b/build/license/eula.en.html index c30161da2a1..fa74cdb45ce 100644 --- a/build/license/eula.en.html +++ b/build/license/eula.en.html @@ -1 +1,436 @@ -

CITRIX® LICENSE AGREEMENT

This is a legal agreement ("AGREEMENT") between you, the Licensed User, and Citrix Systems, Inc., Citrix Systems International GmbH, or Citrix Systems Asia Pacific Pty Ltd. Your location of receipt of this product or feature release (both hereinafter "PRODUCT") or technical support (hereinafter "SUPPORT") determines the providing entity hereunder (the applicable entity is hereinafter referred to as "CITRIX"). Citrix Systems, Inc., a Delaware corporation, licenses this PRODUCT in the Americas and Japan and provides SUPPORT in the Americas. Citrix Systems International GmbH, a Swiss company wholly owned by Citrix Systems, Inc., licenses this PRODUCT and provides SUPPORT in Europe, the Middle East, and Africa, and licenses the PRODUCT in Asia and the Pacific (excluding Japan). Citrix Systems Asia Pacific Pty Ltd. provides SUPPORT in Asia and the Pacific (excluding Japan). Citrix Systems Japan KK provides SUPPORT in Japan. BY INSTALLING AND/OR USING THE PRODUCT, YOU ARE AGREEING TO BE BOUND BY THE TERMS OF THIS AGREEMENT. IF YOU DO NOT AGREE TO THE TERMS OF THIS AGREEMENT, DO NOT INSTALL AND/OR USE THE PRODUCT.

1. GRANT OF LICENSE. The PRODUCT is the Citrix proprietary software program in object code form distributed hereunder. This PRODUCT is licensed under a CPU socket model. The PRODUCT is activated by licenses that allow use of the Software in increments defined by the license model ("Licenses").

Under the CPU socket model, a "CPU socket" is an individual CPU socket on a server running the PRODUCT, regardless of whether or not the socket contains a CPU.

Licenses for other CITRIX PRODUCTS or other editions of the same PRODUCT may not be used to increase the allowable use for the PRODUCT. Licenses are version specific for the PRODUCT. They must be the same version or later than the PRODUCT being accessed. CITRIX grants to you the following worldwide and non-exclusive rights to the PRODUCT and accompanying documentation (collectively called the "SOFTWARE"):

a. License. You may install the SOFTWARE on servers containing up to the number of CPU sockets for which you have purchased Licenses ("Production Servers"). In addition, you may install the management portion of the SOFTWARE on management servers as required to support the SOFTWARE running on the Production Servers. You may use the SOFTWARE to provide cloud services for internal users or third parties. Each License that is installed in both a production and disaster recovery environment may be used only in one of the environments at any one time, except for duplicate use during routine testing of the disaster recovery environment. You have the right to customize the SOFTWARE Web user interface only.

b. Perpetual License. If the SOFTWARE is "Perpetual License SOFTWARE," the SOFTWARE is licensed on a perpetual basis and includes the right to receive Subscription (as defined in Section 2 below).

c. Annual PRODUCT. If the SOFTWARE is "Annual License SOFTWARE," your license is for one (1) year and includes the right to receive Updates for that period (but not under Subscription as defined in Section 2 below). For the purposes of this AGREEMENT, an "Update" shall mean a generally available release of the same SOFTWARE. To extend an Annual License, you must purchase and install a new license prior to the expiration of the current License. Note that if a new License is not purchased and installed, Annual License SOFTWARE is not licensed for use beyond the expiration of the license period. Annual License SOFTWARE may disable itself upon expiration of the license period.

d. Partner Demo. If this SOFTWARE is labeled "Partner Demo," notwithstanding any term to the contrary in this AGREEMENT, your License permits use only if you are a current CITRIX authorized distributor or reseller and then only for demonstration, test, or evaluation purposes in support of your customers. Partner Demo SOFTWARE may not be used for customer training. Note that Partner Demo SOFTWARE disables itself on the "time-out" date identified on the SOFTWARE packaging.

e. Evaluation. If this SOFTWARE is labeled "Evaluation," notwithstanding any term to the contrary in this AGREEMENT, your License permits use only for your internal demonstration, test, or evaluation purposes. Note that Evaluation SOFTWARE disables itself on the "time-out" date identified on the SOFTWARE packaging.

f. Archive Copy. You may make one (1) copy of the SOFTWARE in machine-readable form solely for back-up purposes, provided that you reproduce all proprietary notices on the copy.

2. SUBSCRIPTION RIGHTS. Your initial subscription for Perpetual License SOFTWARE ("Subscription"), including SUPPORT, shall begin on the date the Licenses are delivered to you by email. Subscription shall continue for a one (1) year term subject to your purchase of annual renewals (the "Subscription Term"). During the initial or a renewal Subscription Term, CITRIX may, from time to time, generally make Updates available for licensing to the public. Upon general availability of Updates during the Subscription Term, CITRIX shall provide you with Updates for covered Licenses. Any such Updates so delivered to you shall be considered SOFTWARE under the terms of this AGREEMENT, except they are not covered by the Limited Warranty applicable to SOFTWARE, to the extent permitted by applicable law. Subscription Advantage may be purchased for the SOFTWARE until it is no longer offered in accordance with the CITRIX PRODUCT Support Lifecycle Policy posted at www.citrix.com.

You acknowledge that CITRIX may develop and market new or different computer programs or editions of the SOFTWARE that use portions of the SOFTWARE and that perform all or part of the functions performed by the SOFTWARE. Nothing contained in this AGREEMENT shall give you any rights with respect to such new or different computer programs or editions. You also acknowledge that CITRIX is not obligated under this AGREEMENT to make any Updates available to the public. Any deliveries of Updates shall be Ex Works CITRIX (Incoterms 2000).

3. SUPPORT. SUPPORT is sold including various combinations of Incidents, technical contacts, coverage hours, geographic coverage areas, technical relationship management coverage, and infrastructure assessment options. The offering you purchase determines your entitlement. An "Incident" is defined as a single SUPPORT issue and reasonable effort(s) needed to resolve it. An Incident may require multiple telephone calls and offline research to achieve final resolution. The Incident severity will determine the response levels for the SOFTWARE. Unused Incidents and other entitlements expire at the end of each annual term. SUPPORT may be purchased for the SOFTWARE until it is no longer offered in accordance with the CITRIX PRODUCT Support Lifecycle Policy posted at www.citrix.com. SUPPORT will be provided remotely from CITRIX to your locations. Where on-site visits are mutually agreed, you will be billed for reasonable travel and living expenses in accordance with your travel policy. CITRIX\' performance is predicated upon the following responsibilities being fulfilled by you: (i) you will designate a Customer Support Manager ("CSM") who will be the primary administrative contact; (ii) you will designate Named Contacts (including a CSM), preferably each CITRIX certified, and each Named Contact (excluding CSM) will be supplied with an individual service ID number for contacting SUPPORT; (iii) you agree to perform reasonable problem determination activities and to perform reasonable problem resolution activities as suggested by CITRIX. You agree to cooperate with such requests; (iv) you are responsible for implementing procedures necessary to safeguard the integrity and security of SOFTWARE and data from unauthorized access and for reconstructing any lost or altered files resulting from catastrophic failures; (v) you are responsible for procuring, installing, and maintaining all equipment, telephone lines, communications interfaces, and other hardware at your site and providing CITRIX with access to your facilities as required to operate the SOFTWARE and permitting CITRIX to perform the service called for by this AGREEMENT; and (vi) you are required to implement all currently available and applicable hotfixes, hotfix rollup packs, and service packs or their equivalent to the SOFTWARE in a timely manner. CITRIX is not required to provide any SUPPORT relating to problems arising out of: (i) your or any third party\'s alterations or additions to the SOFTWARE, operating system or environment that adversely affects the SOFTWARE (ii) Citrix provided alterations or additions to the SOFTWARE that do not address Errors or Defects; (ii) any functionality not defined in the PRODUCT documentation published by CITRIX and included with the PRODUCT; (iii) use of the SOFTWARE on a processor and peripherals other than the processor and peripherals defined in the documentation; (iv) SOFTWARE that has reached End-of-Life; and (v) any consulting deliverables from any party. An "Error" is defined as a failure in the SOFTWARE to materially conform to the functionality defined in the documentation. A "Defect" is defined as a failure in the SOFTWARE to conform to the specifications in the documentation. In situations where CITRIX cannot provide a satisfactory resolution to your critical problem through normal SUPPORT methods, CITRIX may engage its product development team to create a private fix. Private fixes are designed to address your specific situation and may not be distributed by you outside your organization without written consent from CITRIX. CITRIX retains all right, title, and interest in and to all private fixes. Any hotfixes or private fixes are not SOFTWARE under the terms of this AGREEMENT and they are not covered by the Limited Warranty or Infringement Indemnification applicable to SOFTWARE, to the extent permitted by applicable law. With respect to infrastructure assessments or other consulting services, all intellectual property rights in all reports, preexisting works and derivative works of such preexisting works, as well as installation scripts and other deliverables and developments made, conceived, created, discovered, invented, or reduced to practice in the performance of the assessment are and shall remain the sole and absolute property of CITRIX, subject to a worldwide, nonexclusive License to you for internal use.

4. DESCRIPTION OF OTHER RIGHTS, LIMITATIONS, AND OBLIGATIONS. You may not transfer, rent, timeshare, grant rights in or lease the SOFTWARE except to the extent such foregoing restriction is expressly prohibited by applicable law. If you purchased Licenses for the SOFTWARE to replace other CITRIX Licenses for other CITRIX SOFTWARE and such replacement is a condition of the transaction, you agree to destroy those other CITRIX Licenses and retain no copies after installation of the new Licenses and SOFTWARE. You shall provide the serial numbers of such replaced Licenses and corresponding replacement Licenses to the reseller, and upon request, directly to CITRIX for license tracking purposes. You may not modify, translate, reverse engineer, decompile, disassemble, create derivative works based on, or copy the SOFTWARE except as specifically licensed herein or to the extent such foregoing restriction is expressly prohibited by applicable law. You may not remove any proprietary notices, labels, or marks on any SOFTWARE. To the extent permitted by applicable law, you agree to allow CITRIX to audit your compliance with the terms of this AGREEMENT upon prior written notice during normal business hours. Notwithstanding the foregoing, this AGREEMENT shall not prevent or restrict you from exercising additional or different rights to any free, open source code, documentation and materials contained in or provided with the SOFTWARE in accordance with the applicable free or open source license for such code, documentation, and materials.

ALL RIGHTS IN THE SOFTWARE NOT EXPRESSLY GRANTED ARE RESERVED BY CITRIX OR ITS SUPPLIERS.

You hereby agree, that to the extent that any applicable mandatory laws (such as, for example, national laws implementing EC Directive 91/250 on the Legal Protection of Computer Programs) give you the right to perform any of the aforementioned activities without the consent of CITRIX to gain certain information about the SOFTWARE, before you exercise any such rights, you shall first request such information from CITRIX in writing detailing the purpose for which you need the information. Only if and after CITRIX, at its sole discretion, partly or completely denies your request, shall you exercise your statutory rights.

5. INFRINGEMENT INDEMNIFICATION. CITRIX shall indemnify and defend, or at its option, settle any claim, suit, or proceeding brought against you based on an allegation that the SOFTWARE (excluding Open Source Software) infringes upon any patent or copyright of any third party ("Infringement Claim"), provided you promptly notify CITRIX in writing of your notification or discovery of an Infringement Claim such that CITRIX is not prejudiced by any delay in such notification. For purposes of this Section 5, "Open Source Software" means software distributed by Citrix under an open source licensing model (e.g., the GNU General Public License, BSD or a license similar to those approved by the Open Source Initiative). CITRIX will have sole control over the defense or settlement of any Infringement Claim and you will provide reasonable assistance in the defense of the same. Following notice of an Infringement Claim, or if CITRIX believes such a claim is likely, CITRIX may at its sole expense and option: (i) procure for you the right to continue to use the alleged infringing SOFTWARE; (ii) replace or modify the SOFTWARE to make it non-infringing; or (iii) accept return of the SOFTWARE and provide you with a refund as appropriate. CITRIX assumes no liability for any Infringement Claims or allegations of infringement based on: (i) your use of any SOFTWARE after notice that you should cease use of such SOFTWARE due to an Infringement Claim; (ii) any modification of the SOFTWARE by you or at your direction; or (iii) your combination of SOFTWARE with non-CITRIX programs, data, hardware, or other materials, if such Infringement Claim would have been avoided by the use of the SOFTWARE alone. THE FOREGOING STATES YOUR EXCLUSIVE REMEDY WITH RESPECT TO ANY INFRINGEMENT CLAIM.

6. LIMITED WARRANTY AND DISCLAIMER. CITRIX warrants that for a period of ninety (90) days from the date of delivery of the SOFTWARE to you, the SOFTWARE will perform substantially in accordance with the PRODUCT documentation published by CITRIX and included with the PRODUCT. CITRIX and its suppliers\' entire liability and your exclusive remedy under this warranty (which is subject to you returning the SOFTWARE to CITRIX or an authorized reseller) will be, at the sole option of CITRIX and subject to applicable law, to replace the media and/or SOFTWARE or to refund the purchase price and terminate this AGREEMENT. This limited warranty does not cover any modification of the SOFTWARE by you or related issues. CITRIX will provide the SUPPORT requested by you in a professional and workmanlike manner, but CITRIX cannot guarantee that every question or problem raised by you will be resolved or resolved in a certain amount of time. With respect to consulting services, CITRIX and its suppliers\' entire liability and your exclusive remedy under this warranty is re-performance of the services.

TO THE EXTENT PERMITTED BY APPLICABLE LAW AND EXCEPT FOR THE ABOVE LIMITED WARRANTY FOR SOFTWARE, CITRIX AND ITS SUPPLIERS MAKE AND YOU RECEIVE NO WARRANTIES OR CONDITIONS, EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE; AND CITRIX AND ITS SUPPLIERS SPECIFICALLY DISCLAIM WITH RESPECT TO SOFTWARE, UPDATES, SUBSCRIPTION ADVANTAGE, AND SUPPORT ANY CONDITIONS OF QUALITY, AVAILABILITY, RELIABILITY, SECURITY, LACK OF VIRUSES, BUGS, OR ERRORS, AND ANY IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF TITLE, QUIET ENJOYMENT, QUIET POSSESSION, MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE IS NOT DESIGNED, MANUFACTURED, OR INTENDED FOR USE OR DISTRIBUTION WITH ANY EQUIPMENT THE FAILURE OF WHICH COULD LEAD DIRECTLY TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. YOU ASSUME THE RESPONSIBILITY FOR THE SELECTION OF THE SOFTWARE AND HARDWARE TO ACHIEVE YOUR INTENDED RESULTS, AND FOR THE INSTALLATION OF, USE OF, AND RESULTS OBTAINED FROM THE SOFTWARE AND HARDWARE.

7. PROPRIETARY RIGHTS. No title to or ownership of the SOFTWARE is transferred to you. CITRIX and/or its licensors own and retain all title and ownership of all intellectual property rights in and to the SOFTWARE, including any adaptations, modifications, translations, derivative works or copies. You acquire only a limited License to use the SOFTWARE.

8. EXPORT RESTRICTION. You agree that you will not export, re-export, or import the SOFTWARE in any form without the appropriate government licenses. You understand that under no circumstances may the SOFTWARE be exported to any country subject to U.S. embargo or to U.S.-designated denied persons or prohibited entities or U.S. specially designated nationals.

9. LIMITATION OF LIABILITY. TO THE EXTENT PERMITTED BY APPLICABLE LAW, YOU AGREE THAT NEITHER CITRIX NOR ITS AFFILIATES, SUPPLIERS, OR AUTHORIZED DISTRIBUTORS SHALL BE LIABLE FOR ANY LOSS OF DATA OR PRIVACY, LOSS OF INCOME, LOSS OF OPPORTUNITY OR PROFITS, COST OF RECOVERY, LOSS ARISING FROM YOUR USE OF THE SOFTWARE OR SUPPORT, OR DAMAGE ARISING FROM YOUR USE OF THIRD PARTY SOFTWARE OR HARDWARE OR ANY OTHER SPECIAL, INCIDENTAL, CONSEQUENTIAL, OR INDIRECT DAMAGES ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT; OR THE USE OF THE SOFTWARE OR SUPPORT, REFERENCE MATERIALS, OR ACCOMPANYING DOCUMENTATION; OR YOUR EXPORTATION, REEXPORTATION, OR IMPORTATION OF THE SOFTWARE, HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY. THIS LIMITATION WILL APPLY EVEN IF CITRIX, ITS AFFILIATES, SUPPLIERS, OR AUTHORIZED DISTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. TO THE EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL THE LIABILITY OF CITRIX, ITS AFFILIATES, SUPPLIERS, OR AUTHORIZED DISTRIBUTORS EXCEED THE AMOUNT PAID FOR THE SOFTWARE, SUBSCRIPTION (INCLUDING SUBSCRIPTION WITH SUPPORT) OR SUPPORT AT ISSUE. YOU ACKNOWLEDGE THAT THE LICENSE OR SUPPORT FEE REFLECTS THIS ALLOCATION OF RISK. SOME JURISDICTIONS DO NOT ALLOW THE LIMITATION OR EXCLUSION OF LIABILITY FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THE ABOVE LIMITATION OR EXCLUSION MAY NOT APPLY TO YOU. For purposes of this AGREEMENT, the term "CITRIX AFFILIATE" shall mean any legal entity fifty percent (50%) or more of the voting interests in which are owned directly or indirectly by Citrix Systems, Inc. Affiliates, suppliers, and authorized distributors are intended to be third party beneficiaries of this AGREEMENT.

10. TERMINATION. This AGREEMENT is effective until terminated. You may terminate this AGREEMENT at any time by removing the SOFTWARE from your computers and destroying all copies and providing written notice to CITRIX with the serial numbers of the terminated licenses. CITRIX may terminate this AGREEMENT at any time for your breach of this AGREEMENT. Unauthorized copying of the SOFTWARE or the accompanying documentation or otherwise failing to comply with the license grant of this AGREEMENT will result in automatic termination of this AGREEMENT and will make available to CITRIX all other legal remedies. You agree and acknowledge that your material breach of this AGREEMENT shall cause CITRIX irreparable harm for which monetary damages alone would be inadequate and that, to the extent permitted by applicable law, CITRIX shall be entitled to injunctive or equitable relief without the need for posting a bond. Upon termination of this AGREEMENT, the License granted herein will terminate and you must immediately destroy the SOFTWARE and accompanying documentation, and all backup copies thereof.

11. U.S. GOVERNMENT END-USERS. If you are a U.S. Government agency, in accordance with Section 12.212 of the Federal Acquisition Regulation (48 CFR 12.212 (October 1995)) and Sections 227.7202-1 and 227.7202-3 of the Defense Federal Acquisition Regulation Supplement (48 CFR 227.7202-1, 227.7202-3 (June 1995)), you hereby acknowledge that the SOFTWARE constitutes "Commercial Computer Software" and that the use, duplication, and disclosure of the SOFTWARE by the U.S. Government or any of its agencies is governed by, and is subject to, all of the terms, conditions, restrictions, and limitations set forth in this standard commercial license AGREEMENT. In the event that, for any reason, Sections 12.212, 227.7202-1 or 227.7202-3 are deemed not applicable, you hereby acknowledge that the Government\'s right to use, duplicate, or disclose the SOFTWARE are "Restricted Rights" as defined in 48 CFR Section 52.227-19(c)(1) and (2) (June 1987), or DFARS 252.227-7014(a)(14) (June 1995), as applicable. Manufacturer is Citrix Systems, Inc., 851 West Cypress Creek Road, Fort Lauderdale, Florida, 33309.

12. AUTHORIZED DISTRIBUTORS AND RESELLERS. CITRIX authorized distributors and resellers do not have the right to make modifications to this AGREEMENT or to make any additional representations, commitments, or warranties binding on CITRIX.

13. CHOICE OF LAW AND VENUE. If provider is Citrix Systems, Inc., this AGREEMENT will be governed by the laws of the State of Florida without reference to conflict of laws principles and excluding the United Nations Convention on Contracts for the International Sale of Goods, and in any dispute arising out of this AGREEMENT, you consent to the exclusive personal jurisdiction and venue in the State and Federal courts within Broward County, Florida. If provider is Citrix Systems International GmbH, this AGREEMENT will be governed by the laws of Switzerland without reference to the conflict of laws principles, and excluding the United Nations Convention on Contracts for the International Sale of Goods, and in any dispute arising out of this AGREEMENT, you consent to the exclusive personal jurisdiction and venue of the competent courts in the Canton of Zurich. If provider is Citrix Systems Asia Pacific Pty Ltd, this AGREEMENT will be governed by the laws of the State of New South Wales, Australia and excluding the United Nations Convention on Contracts for the International Sale of Goods, and in any dispute arising out of this AGREEMENT, you consent to the exclusive personal jurisdiction and venue of the competent courts sitting in the State of New South Wales. If any provision of this AGREEMENT is invalid or unenforceable under applicable law, it shall be to that extent deemed omitted and the remaining provisions will continue in full force and effect. To the extent a provision is deemed omitted, the parties agree to comply with the remaining terms of this AGREEMENT in a manner consistent with the original intent of the AGREEMENT.

14. HOW TO CONTACT CITRIX. Should you have any questions concerning this AGREEMENT or want to contact CITRIX for any reason, write to CITRIX at the following address: Citrix Systems, Inc., Customer Service, 851 West Cypress Creek Road, Ft. Lauderdale, Florida 33309; Citrix Systems International GmbH, Rheinweg 9, CH-8200 Schaffhausen, Switzerland; or Citrix Systems Asia Pacific Pty Ltd., Level 3, 1 Julius Ave., Riverside Corporate Park, North Ryde NSW 2113, Sydney, Australia.

15. TRADEMARKS. Citrixis a trademark and/or registered trademark of Citrix Systems, Inc., in the U.S. and other countries.

+ + +

CITRIX® LICENSE + AGREEMENT

+ +

This is a legal + agreement ("AGREEMENT") between you, the Licensed User, and Citrix + Systems, Inc., Citrix Systems International GmbH, or Citrix Systems Asia + Pacific Pty Ltd. Your location of receipt of this product or feature release + (both hereinafter "PRODUCT") or technical support (hereinafter + "SUPPORT") determines the providing entity hereunder (the applicable + entity is hereinafter referred to as "CITRIX"). Citrix Systems, Inc., + a Delaware corporation, licenses this PRODUCT in the Americas and Japan and + provides SUPPORT in the Americas. Citrix Systems International GmbH, a Swiss + company wholly owned by Citrix Systems, Inc., licenses this PRODUCT and + provides SUPPORT in Europe, the Middle East, and Africa, and licenses the + PRODUCT in Asia and the Pacific (excluding Japan). Citrix Systems Asia Pacific + Pty Ltd. provides SUPPORT in Asia and the Pacific (excluding Japan). Citrix + Systems Japan KK provides SUPPORT in Japan. BY INSTALLING AND/OR USING THE + PRODUCT, YOU ARE AGREEING TO BE BOUND BY THE TERMS OF THIS AGREEMENT. IF YOU DO + NOT AGREE TO THE TERMS OF THIS AGREEMENT, DO NOT INSTALL AND/OR USE THE + PRODUCT.

+ +

1. GRANT OF LICENSE. The + PRODUCT is the Citrix proprietary software program in object code form + distributed hereunder. This PRODUCT is licensed under a CPU socket model. The + PRODUCT is activated by licenses that allow use of the Software in increments + defined by the license model ("Licenses").

+ +

Under the CPU socket + model, a "CPU socket" is an individual CPU socket on a server running + the PRODUCT, regardless of whether or not the socket contains a CPU.

+ +

Licenses for other + CITRIX PRODUCTS or other editions of the same PRODUCT may not be used to + increase the allowable use for the PRODUCT. Licenses are version specific for + the PRODUCT. They must be the same version or later than the PRODUCT being + accessed. CITRIX grants to you the following worldwide and non-exclusive rights + to the PRODUCT and accompanying documentation (collectively called the + "SOFTWARE"):

+ +

a. + License. + You may install the SOFTWARE on servers containing up to the number of CPU + sockets for which you have purchased Licenses ("Production Servers"). + In addition, you may install the management portion of the SOFTWARE on + management servers as required to support the SOFTWARE running on the + Production Servers. You may use the SOFTWARE to provide cloud services for + internal users or third parties. Each License that is installed in both a + production and disaster recovery environment may be used only in one of the + environments at any one time, except for duplicate use during routine testing + of the disaster recovery environment. You have the right to customize the + SOFTWARE Web user interface only.

+ +

b. + Perpetual + License. If the SOFTWARE is "Perpetual License SOFTWARE," the + SOFTWARE is licensed on a perpetual basis.

+ +

c. + Annual + PRODUCT. If the SOFTWARE is "Annual License SOFTWARE," your license + is for one (1) year. To extend an Annual License, you must purchase and install + a new license prior to the expiration of the current License. Note that if a + new License is not purchased and installed, Annual License SOFTWARE is not + licensed for use beyond the expiration of the license period. Annual License + SOFTWARE may disable itself upon expiration of the license period.

+ +

d. + Partner + Demo. If this SOFTWARE is labeled "Partner Demo," notwithstanding any + term to the contrary in this AGREEMENT, your License permits use only if you + are a current CITRIX authorized distributor or reseller and then only for demonstration, + test, or evaluation purposes in support of your customers. Partner Demo + SOFTWARE may not be used for customer training. Note that Partner Demo SOFTWARE + disables itself on the "time-out" date identified on the SOFTWARE + packaging.

+ +

e. + Evaluation. + If this SOFTWARE is labeled "Evaluation," notwithstanding any term to + the contrary in this AGREEMENT, your License permits use only for your internal + demonstration, test, or evaluation purposes. Note that Evaluation SOFTWARE + disables itself on the "time-out" date identified on the SOFTWARE + packaging.

+ +

f.  + Archive + Copy. You may make one (1) copy of the SOFTWARE in machine-readable form solely + for back-up purposes, provided that you reproduce all proprietary notices on + the copy.

+ +

2. MAINTENANCE. You must + purchase maintenance ("Maintenance") with new licenses for either + Perpetual or

+ +

Annual License SOFTWARE. + Your initial Maintenance shall begin on the date the Licenses are delivered to + you by email. Maintenance shall continue for a one (1) year term subject to + your purchase of annual renewals (the "Maintenance Term"). During the + initial or a renewal Maintenance Term, CITRIX will provide SUPPORT and Updates. + For the purposes of this AGREEMENT, an "Update" shall mean a + generally available release of the same SOFTWARE which Citrix makes available + from time to time. Updates shall be considered SOFTWARE under the terms of this + AGREEMENT, except they are not covered by the Limited Warranty applicable to + SOFTWARE, to the extent permitted by applicable law. Maintenance may be + purchased for the SOFTWARE until it is no longer offered in accordance with the + CITRIX PRODUCT Support Lifecycle Policy posted at www.citrix.com. + You acknowledge that CITRIX may develop and market new or different + computer programs or editions of the SOFTWARE that use portions of the SOFTWARE + and that perform all or part of the functions performed by the SOFTWARE. + Nothing contained in this AGREEMENT shall give you any rights with respect to + such new or different computer programs or editions. You also acknowledge that + CITRIX is not obligated under this AGREEMENT to make any Updates available to + the public. Any deliveries of Updates shall be electronic. SUPPORT includes + unlimited Incidents, unlimited named contacts and 24x7x365 worldwide coverage. + You may also purchase technical relationship management coverage and consulting + services as part of SUPPORT. The offering you purchase determines your + entitlement. An "Incident" is defined as a single SUPPORT issue and + reasonable effort(s) needed to resolve it. An Incident may require multiple + telephone calls and offline research to achieve final resolution. The Incident + severity will determine the response levels for the SOFTWARE. Unused + entitlements expire at the end of each annual term. SUPPORT may be purchased + for the SOFTWARE until it is no longer offered in accordance with the CITRIX + PRODUCT Support Lifecycle Policy posted at www.citrix.com. + SUPPORT will be provided remotely from CITRIX to your locations. Where + on-site visits are mutually agreed, you will be billed for reasonable travel + and living expenses in accordance with your travel policy. CITRIX’ performance + is predicated upon the following responsibilities being fulfilled by you: (i) + you will designate a Customer Support Manager ("CSM") who will be the + primary administrative contact; (ii) you agree to perform reasonable problem + determination activities and to perform reasonable problem resolution + activities as suggested by CITRIX; (iii) you are responsible for implementing + procedures necessary to safeguard the integrity and security of SOFTWARE and + data from unauthorized access and for reconstructing any lost or altered files + resulting from catastrophic failures; (iv) you are responsible for procuring, + installing, and maintaining all equipment, telephone lines, communications + interfaces, and other hardware at your site and providing CITRIX with access to + your facilities as required to operate the SOFTWARE and permitting CITRIX to + perform the service called for by this AGREEMENT; and (v) you are required to + implement all currently available and applicable hotfixes, hotfix rollup packs, + and service packs or their equivalent to the SOFTWARE in a timely manner. + CITRIX is not required to provide any SUPPORT relating to problems arising out + of: (i) your or any third party’s alterations or additions to the SOFTWARE, + operating system or environment that adversely affects the SOFTWARE (ii) Citrix + provided alterations or additions to the SOFTWARE that do not address Errors or + Defects; (ii) any functionality not defined in the PRODUCT documentation + published by CITRIX and included with the PRODUCT; (iii) use of the SOFTWARE on + a processor and peripherals other than the processor and peripherals defined in + the documentation; (iv) SOFTWARE that has reached End-of-Life; and (v) any + consulting deliverables from any party. An "Error" is defined as a + failure in the SOFTWARE to materially conform to the functionality defined in + the documentation. A "Defect" is defined as a failure in the SOFTWARE + to conform to the specifications in the documentation. In situations where + CITRIX cannot provide a satisfactory resolution to your critical problem + through normal SUPPORT methods, CITRIX may engage its product development team + to create a private fix. Private fixes are designed to address your specific + situation and may not be distributed by you outside your organization without + written consent from CITRIX. CITRIX retains all right, title, and interest in + and to all private fixes. Any hotfixes or private fixes are not SOFTWARE under + the terms of this AGREEMENT and they are not covered by the Limited Warranty or + Infringement Indemnification applicable to SOFTWARE, to the extent permitted by + applicable law. With respect to consulting services, all intellectual property + rights in all reports, preexisting works and derivative works of such + preexisting works, as well as installation scripts and other deliverables and + developments made, conceived, created, discovered, invented, or reduced to + practice in the performance of the assessment are and shall remain the sole and + absolute property of CITRIX, subject to a worldwide, nonexclusive License to + you for internal use.

+ +

3. DESCRIPTION OF OTHER + RIGHTS, LIMITATIONS, AND OBLIGATIONS. You may not transfer, rent, timeshare, + grant rights in or lease the SOFTWARE except to the extent such foregoing + restriction is expressly prohibited by applicable law. If you purchased + Licenses for the SOFTWARE to replace other CITRIX Licenses for other CITRIX + SOFTWARE and such replacement is a condition of the transaction, you agree to + destroy

+ +

+ +

+ +

those other CITRIX + Licenses and retain no copies after installation of the new Licenses and + SOFTWARE. You shall provide the serial numbers of such replaced Licenses and + corresponding replacement Licenses to the reseller, and upon request, directly + to CITRIX for license tracking purposes. You may not modify, translate, reverse + engineer, decompile, disassemble, create derivative works based on, or copy the + SOFTWARE except as specifically licensed herein or to the extent such foregoing + restriction is expressly prohibited by applicable law. You may not remove any + proprietary notices, labels, or marks on any SOFTWARE. To the extent permitted + by applicable law, you agree to allow CITRIX to audit your compliance with the + terms of this AGREEMENT upon prior written notice during normal business hours. + Notwithstanding the foregoing, this AGREEMENT shall not prevent or restrict you + from exercising additional or different rights to any free, open source code, + documentation and materials contained in or provided with the SOFTWARE in + accordance with the applicable free or open source license for such code, + documentation, and materials.

+ +

ALL RIGHTS IN THE + SOFTWARE NOT EXPRESSLY GRANTED ARE RESERVED BY CITRIX OR ITS SUPPLIERS.

+ +

You hereby agree, that + to the extent that any applicable mandatory laws (such as, for example, + national laws implementing EC Directive 91/250 on the Legal Protection of + Computer Programs) give you the right to perform any of the aforementioned + activities without the consent of CITRIX to gain certain information about the + SOFTWARE, before you exercise any such rights, you shall first request such + information from CITRIX in writing detailing the purpose for which you need the + information. Only if and after CITRIX, at its sole discretion, partly or + completely denies your request, shall you exercise your statutory rights.

+ +

4. + INFRINGEMENT + INDEMNIFICATION. CITRIX shall indemnify and defend, or at its option, settle + any claim, suit, or proceeding brought against you based on an allegation that + the SOFTWARE (excluding Open Source Software) infringes upon any patent or + copyright of any third party ("Infringement Claim"), provided you + promptly notify CITRIX in writing of your notification or discovery of an + Infringement Claim such that CITRIX is not prejudiced by any delay in such + notification. For purposes of this Section 4, "Open Source Software" + means software distributed by Citrix under an open source licensing model + (e.g., the GNU General Public License, BSD or a license similar to those + approved by the Open Source Initiative). CITRIX will have sole control over the + defense or settlement of any Infringement Claim and you will provide reasonable + assistance in the defense of the same. Following notice of an Infringement + Claim, or if CITRIX believes such a claim is likely, CITRIX may at its sole + expense and option: (i) procure for you the right to continue to use the + alleged infringing SOFTWARE; (ii) replace or modify the SOFTWARE to make it + non-infringing; or (iii) accept return of the SOFTWARE and provide you with a + refund as appropriate. CITRIX assumes no liability for any Infringement Claims + or allegations of infringement based on: (i) your use of any SOFTWARE after notice + that you should cease use of such SOFTWARE due to an Infringement Claim; (ii) + any modification of the SOFTWARE by you or at your direction; or (iii) your + combination of SOFTWARE with non-CITRIX programs, data, hardware, or other + materials, if such Infringement Claim would have been avoided by the use of the + SOFTWARE alone. THE FOREGOING STATES YOUR EXCLUSIVE REMEDY WITH RESPECT TO ANY + INFRINGEMENT CLAIM.

+ +

5. + LIMITED + WARRANTY AND DISCLAIMER. CITRIX warrants that for a period of ninety (90) days + from the date of delivery of the SOFTWARE to you, the SOFTWARE will perform + substantially in accordance with the PRODUCT documentation published by CITRIX + and included with the PRODUCT. CITRIX and its suppliers’ entire liability and + your exclusive remedy under this warranty (which is subject to you returning + the SOFTWARE to CITRIX or an authorized reseller) will be, at the sole option + of CITRIX and subject to applicable law, to replace the media and/or SOFTWARE + or to refund the purchase price and terminate this AGREEMENT. This limited + warranty does not cover any modification of the SOFTWARE by you or related + issues. CITRIX will provide the SUPPORT requested by you in a professional and + workmanlike manner, but CITRIX cannot guarantee that every question or problem + raised by you will be resolved or resolved in a certain amount of time. With + respect to SUPPORT, CITRIX and its suppliers’ entire liability and your + exclusive remedy under this warranty is re-performance of the services. 
TO + THE EXTENT PERMITTED BY APPLICABLE LAW AND EXCEPT FOR THE ABOVE LIMITED + WARRANTY FOR SOFTWARE, CITRIX AND ITS SUPPLIERS MAKE AND YOU RECEIVE NO + WARRANTIES OR CONDITIONS, EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE; AND CITRIX + AND ITS SUPPLIERS SPECIFICALLY DISCLAIM WITH RESPECT TO SOFTWARE, UPDATES,

+ +

SUBSCRIPTION ADVANTAGE, + AND SUPPORT ANY CONDITIONS OF QUALITY, AVAILABILITY, RELIABILITY, SECURITY, + LACK OF VIRUSES, BUGS, OR ERRORS, AND ANY IMPLIED WARRANTIES, INCLUDING, + WITHOUT LIMITATION, ANY WARRANTY OF TITLE, QUIET ENJOYMENT, QUIET POSSESSION, + MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. THE + SOFTWARE IS NOT DESIGNED, MANUFACTURED, OR INTENDED FOR USE OR DISTRIBUTION + WITH ANY EQUIPMENT THE FAILURE OF WHICH COULD LEAD DIRECTLY TO DEATH, PERSONAL + INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. YOU ASSUME THE + RESPONSIBILITY FOR THE SELECTION OF THE SOFTWARE AND HARDWARE TO ACHIEVE YOUR + INTENDED RESULTS, AND FOR THE INSTALLATION OF, USE OF, AND RESULTS OBTAINED + FROM THE SOFTWARE AND HARDWARE.

+ +

6. + PROPRIETARY + RIGHTS. No title to or ownership of the SOFTWARE is transferred to you. CITRIX + and/or its licensors own and retain all title and ownership of all intellectual + property rights in and to the SOFTWARE, including any adaptations, + modifications, translations, derivative works or copies. You acquire only a + limited License to use the SOFTWARE.

+ +

7. + EXPORT + RESTRICTION. You agree that you will not export, re-export, or import the + SOFTWARE in any form without the appropriate government licenses. You + understand that under no circumstances may the SOFTWARE be exported to any + country subject to U.S. embargo or to U.S.-designated denied persons or + prohibited entities or U.S. specially designated nationals.

+ +

8. + LIMITATION + OF LIABILITY. TO THE EXTENT PERMITTED BY APPLICABLE LAW, YOU AGREE THAT NEITHER + CITRIX NOR ITS AFFILIATES, SUPPLIERS, OR AUTHORIZED DISTRIBUTORS SHALL BE + LIABLE FOR ANY LOSS OF DATA OR PRIVACY, LOSS OF INCOME, LOSS OF OPPORTUNITY OR + PROFITS, COST OF RECOVERY, LOSS ARISING FROM YOUR USE OF THE SOFTWARE OR + SUPPORT, OR DAMAGE ARISING FROM YOUR USE OF THIRD PARTY SOFTWARE OR HARDWARE OR + ANY OTHER SPECIAL, INCIDENTAL, CONSEQUENTIAL, OR INDIRECT DAMAGES ARISING OUT + OF OR IN CONNECTION WITH THIS AGREEMENT; OR THE USE OF THE SOFTWARE OR SUPPORT, + REFERENCE MATERIALS, OR ACCOMPANYING DOCUMENTATION; OR YOUR EXPORTATION, + REEXPORTATION, OR IMPORTATION OF THE SOFTWARE, HOWEVER CAUSED AND ON ANY THEORY + OF LIABILITY. THIS LIMITATION WILL APPLY EVEN IF CITRIX, ITS AFFILIATES, + SUPPLIERS, OR AUTHORIZED DISTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF + SUCH DAMAGES. TO THE EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL THE + LIABILITY OF CITRIX, ITS AFFILIATES, SUPPLIERS, OR AUTHORIZED DISTRIBUTORS + EXCEED THE AMOUNT PAID FOR THE SOFTWARE, SUBSCRIPTION (INCLUDING SUBSCRIPTION + WITH SUPPORT) OR SUPPORT AT ISSUE. YOU ACKNOWLEDGE THAT THE LICENSE OR SUPPORT + FEE REFLECTS THIS ALLOCATION OF RISK. SOME JURISDICTIONS DO NOT ALLOW THE + LIMITATION OR EXCLUSION OF LIABILITY FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES, + SO THE ABOVE LIMITATION OR EXCLUSION MAY NOT APPLY TO YOU. For purposes of this + AGREEMENT, the term "CITRIX AFFILIATE" shall mean any legal entity + fifty percent (50%) or more of the voting interests in which are owned directly + or indirectly by Citrix Systems, Inc. Affiliates, suppliers, and authorized + distributors are intended to be third party beneficiaries of this AGREEMENT.

+ +

9. + TERMINATION. + This AGREEMENT is effective until terminated. You may terminate this AGREEMENT + at any time by removing the SOFTWARE from your computers and destroying all + copies and providing written notice to CITRIX with the serial numbers of the + terminated licenses. CITRIX may terminate this AGREEMENT at any time for your + breach of this AGREEMENT. Unauthorized copying of the SOFTWARE or the + accompanying documentation or otherwise failing to comply with the license + grant of this AGREEMENT will result in automatic termination of this AGREEMENT + and will make available to CITRIX all other legal remedies. You agree and + acknowledge that your material breach of this AGREEMENT shall cause CITRIX + irreparable harm for which monetary damages alone would be inadequate and that, + to the extent permitted by applicable law, CITRIX shall be entitled to + injunctive or equitable relief without the need for posting a bond. Upon + termination of this AGREEMENT, the License granted herein will terminate and + you must immediately destroy the SOFTWARE and accompanying documentation, and + all backup copies thereof.

+ +

10.           + U.S. + GOVERNMENT END-USERS. If you are a U.S. Government agency, in accordance with + Section 12.212

+ +

of the Federal + Acquisition Regulation (48 CFR 12.212 (October 1995)) and Sections 227.7202-1 + and 227.7202-3 of the Defense Federal Acquisition Regulation Supplement (48 CFR + 227.7202-1, 227.7202-3 (June 1995)), you hereby acknowledge that the SOFTWARE + constitutes "Commercial Computer Software" and that the use, + duplication, and disclosure of the SOFTWARE by the U.S. Government or any of + its agencies is governed by, and is subject to, all of the terms, conditions, + restrictions, and limitations set forth in this standard commercial license + AGREEMENT. In the event that, for any reason, Sections 12.212, 227.7202-1 or + 227.7202-3 are deemed not applicable, you hereby acknowledge that the + Government’s right to use, duplicate, or disclose the SOFTWARE are + "Restricted Rights" as defined in 48 CFR Section 52.227-19(c)(1) and + (2) (June 1987), or DFARS 252.227-7014(a)(14) (June 1995), as applicable. + Manufacturer is Citrix Systems, Inc., 851 West Cypress Creek Road, Fort + Lauderdale, Florida, 33309.

+ +

11.           + AUTHORIZED + DISTRIBUTORS AND RESELLERS. CITRIX authorized distributors and resellers do not + have the right to make modifications to this AGREEMENT or to make any + additional representations, commitments, or warranties binding on CITRIX.

+ +

12.           + CHOICE + OF LAW AND VENUE. If provider is Citrix Systems, Inc., this AGREEMENT will be + governed by the laws of the State of Florida without reference to conflict of + laws principles and excluding the United Nations Convention on Contracts for + the International Sale of Goods, and in any dispute arising out of this + AGREEMENT, you consent to the exclusive personal jurisdiction and venue in the + State and Federal courts within Broward County, Florida. If provider is Citrix + Systems International GmbH, this AGREEMENT will be governed by the laws of + Switzerland without reference to the conflict of laws principles, and excluding + the United Nations Convention on Contracts for the International Sale of Goods, + and in any dispute arising out of this AGREEMENT, you consent to the exclusive + personal jurisdiction and venue of the competent courts in the Canton of + Zurich. If provider is Citrix Systems Asia Pacific Pty Ltd, this AGREEMENT will + be governed by the laws of the State of New South Wales, Australia and + excluding the United Nations Convention on Contracts for the International Sale + of Goods, and in any dispute arising out of this AGREEMENT, you consent to the + exclusive personal jurisdiction and venue of the competent courts sitting in + the State of New South Wales. If any provision of this AGREEMENT is invalid or + unenforceable under applicable law, it shall be to that extent deemed omitted + and the remaining provisions will continue in full force and effect. To the + extent a provision is deemed omitted, the parties agree to comply with the + remaining terms of this AGREEMENT in a manner consistent with the original + intent of the AGREEMENT.

+ +

13.           + HOW + TO CONTACT CITRIX. Should you have any questions concerning this AGREEMENT or + want to contact CITRIX for any reason, write to CITRIX at the following + address: Citrix Systems, Inc., Customer Service, 851 West Cypress Creek Road, + Ft. Lauderdale, Florida 33309; Citrix Systems International GmbH, Rheinweg 9, + CH-8200 Schaffhausen, Switzerland; or Citrix Systems Asia Pacific Pty Ltd., + Level 3, 1 Julius Ave., Riverside Corporate Park, North Ryde NSW 2113, Sydney, + Australia.

+ +

14.           + TRADEMARKS. + Citrix is a trademark and/or registered trademark of Citrix Systems, Inc., in + the U.S. and other countries.

+ +

CTX_code: CS_P_A118407

+ +

 

+ diff --git a/build/license/eula.ja.html b/build/license/eula.ja.html index 672a59c0bc1..f2f7093ff37 100755 --- a/build/license/eula.ja.html +++ b/build/license/eula.ja.html @@ -1,4 +1,3 @@ -<<<<<<< HEAD

CITRIX® ライセンス契約書

@@ -127,6 +126,3 @@

 

-======= -

CITRIX® ライセンス契約書

この文書は、ライセンスを許諾されるユーザー (以下「お客様」といいます) と Citrix Systems, Inc、、Citrix Systems International GmbH、または Citrix Systems Asia Pacific Pty Ltd. との間で締結される法的に有効な契約 (以下「本契約」といいます) です。お客様がこの製品または Feature Release (以下、総称して「本製品」といいます) またはテクニカル サポート (以下「サポート」といいます) の引渡しを受ける具体的場所によって、本契約に基づく提供主体が決定されます (該当する法人を、以下「CITRIX」といいます)。アメリカ合衆国デラウェア州法人である Citrix Systems, Inc. は、北米、中米、および南米の各諸国ならびに日本において、本製品のライセンス許諾を提供し、北米、中米、および南米の各諸国において、サポートを提供しています。Citrix Systems, Inc. が全株式を保有するスイス連邦共和国法人である Citrix Systems International GmbH は、ヨーロッパ、中東、およびアフリカにおいて、本製品のライセンス許諾およびサポートを提供し、アジアおよび太平洋沿岸の各諸国 (日本を除く) において、本製品のライセンス許諾を提供しています。Citrix Systems Asia Pacific Pty Ltd. は、アジアおよび太平洋沿岸の各諸国 (日本を除く) において、サポートを提供しています。シトリックス・システムズ・ジャパン株式会社は、日本において、サポートを提供しています。本製品をインストールおよび/または使用することにより、お客様は本契約条項に同意されたものとします。本契約条項に同意いただけない場合、本製品をインストールおよび/または使用しないでください。

1. ライセンスの許諾。本製品は Citrix が所有権を有するソフトウェア プログラムであり、本契約に基づいてオブジェクト コード形式で配布されます。本製品のライセンスは、CPU ソケット モデルに基づいて許諾されます。ライセンス モデルで定義される、ソフトウェアを使用可能にするライセンス (以下「ライセンス」といいます) によって、本製品がアクティブ化されます。

CPU ソケット モデル条件では、「CPU ソケット」は、CPU を搭載するか否かにかかわらず、本製品を実行するサーバー上の個々の CPU ソケットを指します。

ほかの CITRIX 製品または本製品の別のエディションのライセンスを使って、本製品の許可される使用範囲を拡大することはできません。ライセンスは製品のバージョンごとに許諾されます。ライセンスのバージョンは、アクセスされる本製品と同じか新しいものである必要があります。CITRIX は、お客様に対し、本製品および付属文書 (以下、総称して「本ソフトウェア」といいます) について、次のとおり全世界での非独占的な権利を許諾します。

a. ライセンス。お客様は、合計して最大でお客様が購入されたライセンスに対応する数の CPU ソケットを搭載する複数のサーバー (以下「実稼働サーバー」といいます) に、本ソフトウェアをインストールすることができます。さらに、お客様は、実稼働サーバーで実行する本ソフトウェアをサポートするために、必要に応じて管理用サーバーに本ソフトウェアの管理部分をインストールすることができます。お客様は、内部ユーザーまたは第三者にクラウド サービスを提供するために、本ソフトウェアを使用することができます。実稼働環境と障害復旧環境の両方にインストールされている各ライセンスは、常に、いずれか一方の環境でのみ使用することができます。ただし、障害復旧環境の定期的なテストを行う場合に限り、当該ライセンスを両方の環境で同時に使用することができます。お客様は、本ソフトウェアの Web ユーザー インターフェイスのみをカスタマイズする権利を有します。

b. 永続的ライセンス。本ソフトウェアが「永続的ライセンス ソフトウェア」である場合は、本ソフトウェアは永続的に使用許諾され、サブスクリプション (下記第 2 条に定義します) による特典を受け取る権利が含まれるものとします。

c. 年間ライセンス。本ソフトウェアが「年間ライセンス ソフトウェア」である場合は、お客様のライセンスは 1 年間有効で、この期間中アップデートを受け取る権利が含まれます。ただし、サブスクリプション (下記第 2 条に定義します) に基づくものではありません。本契約においてアップデートとは、本ソフトウェア専用に一般に公開されたリリース版を意味するものとします。年間ライセンスの有効期間を延長するには、お客様は、現在のライセンスが失効する前に、新しいライセンスを購入しインストールしていただく必要があります。新しいライセンスを購入しインストールしていただけない場合は、年間ライセンス ソフトウェアを、ライセンスの有効期間失効後は使用いただけません。年間ライセンス ソフトウェアは、ライセンスの有効期間が失効した時点で自動的に無効となります。

d. パートナー デモ用ソフトウェア。本ソフトウェアに「Partner Demo (パートナー デモ用)」と記されている場合は、本契約の規定のいかんにかかわらず、お客様が現在 CITRIX 正規ディストリビューターまたは正規販売代理店である場合に限り、お客様の顧客の支援を目的として、デモンストレーション、テストまたは評価のためにのみ、本ソフトウェアを使用することができます。パートナー デモ用ソフトウェアは、お客様の顧客トレーニングを目的として使用することはできません。パートナー デモ用ソフトウェアは、ソフトウェア パッケージに記された期日に自動的に機能を停止します。

e. 評価用ソフトウェア。本ソフトウェアに「Evaluation (評価用)」と記されている場合は、本契約の規定のいかんにかかわらず、お客様自身の内部デモンストレーション、テストまたは評価のためにのみ、本ソフトウェアを使用することができます。評価用ソフトウェアは、ソフトウェア パッケージに記された期日に自動的に機能を停止します。

f. 保存用の複製。お客様は、バックアップのみを目的として、機械可読形態の本ソフトウェアの複製物を 1 部作成することができます。ただし、所有権に関するすべての表示を当該複製物に複写することを条件とします。

2. サブスクリプション権。永続的ライセンス ソフトウェアの初回サブスクリプション (以下「サブスクリプション」といいます) はサポートを含み、電子メールによるお客様に対するユーザー ライセンスの引渡日より開始します。サブスクリプションは、その後 1 年間存続しますが、毎年更新をご購入いただく必要があります (「サブスクリプション期間」)。最初のまたは更新されたサブスクリプション期間中、CITRIX は随時、アップデートを一般に公開する場合があります。サブスクリプション期間内にアップデートが公開された場合、CITRIX はお客様に対し、対象となるライセンスのアップデートを提供するものとします。お客様に引き渡される一切のアップデートは、本契約条項に基づく本ソフトウェアとみなされます。ただし、適用法令により許容される限度において、本ソフトウェアに適用される限定担保責任条項の対象外とします。本ソフトウェアに対する Subscription Advantage は、www.citrix.com に掲載される Citrix Product Lifecycle Support Policy (Citrix 製品ライフサイクル サポート ポリシー) に従って本ソフトウェアが提供されなくなるまでご購入いただけます。

お客様は、CITRIX が、本ソフトウェアの一部を使用し、本ソフトウェアの機能の一部または全部を実行できる、新規のもしくは異なるコンピューター プログラムまたは本ソフトウェアの別エディションを開発し販売できることを了承するものとします。本契約は、そのような新規もしくは異なるコンピューター プログラムまたはエディションに関する権利をお客様に許諾するものではありません。お客様は、CITRIX は、本契約によってアップデートを公開する義務を負わないことも了承するものとします。アップデートの引渡条件は、CITRIX の工場渡し条件 (Ex Works CITRIX) とします (2000 年インコタームズ準拠)。

3. サポート。サポートは、インシデント、テクニカル サポート コンタクト、対応時間、地理的な対応地域、テクニカル リレーションシップ マネジメント サービス、およびインフラストラクチャ評価オプションからなるさまざまな組み合わせにより販売されます。利用権の内容は、お客様が購入する商品により決定されます。「インシデント」とは、サポートを必要とする単一の問題およびその解決に必要とされる合理的な努力とします。最終的な解決策を見いだすためには、複数回の通話およびオフラインでの調査が必要となる可能性があります。インシデントの重大度によって、本ソフトウェアに対する対応レベルが決定されます。未使用のインシデントおよびそのほかの利用権は、各年間サポート期間の終了日に失効します。本ソフトウェアに対するサポートは、www.citrix.com に掲載される Citrix Product Lifecycle Support Policy (Citrix 製品ライフサイクル サポート ポリシー) に従って本ソフトウェアが提供されなくなるまでご購入いただけます。サポートは、CITRIX からお客様の所在地へ、リモートで提供されます。お客様の所在地への訪問が相互に合意された場合は、CITRIX は、お客様の出張規定に従って、妥当な旅費および滞在費をお客様に請求するものとします。CITRIX によるサポートの実施は、お客様に次の義務を履行いただくことを前提とします。(i) 主な管理担当者としてカスタマー サポート マネージャー (Customer Support Manager: 以下「CSM」といいます) を 1 名指名いただきます。(ii) ネームド コンタクト (CSM を含む) として、できれば CITRIX 認定資格を有する方を複数名指名いただきます。各ネームド コンタクト (CSM を除く) には、サポート担当者に連絡するための個別のサービス ID が提供されます。(iii) CITRIX の指示に従い、問題の特定および解決のための合理的な作業を行うことに同意いただきます。お客様には、このような要求に協力することに同意いただきます。(iv) ソフトウェアおよびデータの保全性および安全性を不正アクセスから保護するために必要な手続きを導入すること、および致命的なエラーにより消失または変更されたあらゆるファイルを再構築することに責任を負っていただきます。(v) お客様の所在地においてすべての機器、電話回線、通信インターフェイスおよびそのほかのハードウェアを調達、設置および保守すること、本ソフトウェアを操作するために、お客様の設備へのアクセスを CITRIX に必要に応じて提供すること、ならびに本契約により要求されるサービスを CITRIX が実施することを許可することに責任を負っていただきます。(vi) 現在入手可能および適用可能なすべての Hotfix、Hotfix ロールアップ パック、サービス パック等を、本ソフトウェアに時宜に即して適用いただきます。CITRIX は、次を原因とする問題に関連して、一切のサポート提供を要求されないものとします。(i) 本ソフトウェアに悪影響を与える、本ソフトウェア、オペレーティング システム、または環境に対するお客様または第三者による改変または追加。(ii) 異常または欠陥に対処しない、本ソフトウェアに対する CITRIX による改変または追加。(ii) CITRIX によって発行され、本製品に同封されている製品付属文書に定義されていない機能。(iii) 文書に定義されているプロセッサおよび周辺機器以外のプロセッサおよび周辺機器での本ソフトウェアの使用。(iv) 製造終了した本ソフトウェア。(v) 当事者によるコンサルティング成果物。「異常」とは、文書に定義されている機能に実質的に適合しない、本ソフトウェアにおける不具合のこととします。「欠陥」とは、文書に記載されている仕様に適合しない、本ソフトウェアにおける不具合のこととします。お客様の重大な問題に対して CITRIX が通常のサポート方法では十分な解決策を提供できない場合は、CITRIX はその製品開発チームに Private fix を作成させる場合があります。Private fix はお客様固有の状況に対処するために設計されるもので、CITRIX の書面による同意なしに、お客様の組織外で配布することはできません。CITRIX は、すべての Private fix に対するすべての権利、権原および利益を留保します。一切の Hotfix または Private fix は、本契約条項に基づく本ソフトウェアではなく、適用法令により許容される限度において、本ソフトウェアに適用される限定担保責任条項または知的財産権侵害補償条項の対象外とします。インフラストラクチャ評価またはそのほかのコンサルティング サービスについて、すべてのレポート、既存の著作物およびその二次的著作物、ならびに評価の実施にあたり作成、着想、創作、発見、発明または実用化される、インストール スクリプトその他成果物および開発成果にかかるすべての知的財産権は、CITRIX が単独かつ完全に有し続けるものとします。なお、お客様には、お客様の内部使用を目的に、この知的財産権に対する全世界での、非独占的な権利が許諾されます。

4. その他の権利、制限および義務。下記制限特約をすることが適用法令によって明示的に禁止されている限度を除き、お客様は、本ソフトウェアを譲渡、レンタル、タイムシェアリング、権利を許諾、およびリースすることはできません。お客様が本ソフトウェア以外の CITRIX ソフトウェアの CITRIX ライセンスと置き換えるために本ソフトウェアのライセンスを購入された場合で、この置換えが当該購入についての取引条件であるときは、お客様は、新しいライセンスおよび本ソフトウェアをインストールした後は、置き換えられた以前の CITRIX ライセンスを破棄し、いかなる複製物も保有しないことに合意するものとします。お客様は、ライセンスの追跡調査を目的として、当該置換対象ライセンスおよび置き換え後の対応ライセンスのシリアル番号を販売代理店または、請求ある場合、CITRIX に直接提供するものとします。本契約において明示的に許可される場合または下記制限特約をすることが適用法令によって明示的に禁止されている限度を除き、お客様は、本ソフトウェアを改変、翻訳、リバース エンジニア、逆コンパイル、逆アセンブル、本ソフトウェアに基づいた二次的著作物の作成、または複製物の作成をすることはできません。お客様は、本ソフトウェアから所有権に関する通知、ラベル、表示の一切を除去することはできません。お客様は、適用法令により許容される限度において、CITRIX が、事前通知をもって、お客様の通常営業時間内に本契約の遵守状況を調査することに同意いただくものとします。上記にかかわらず、本契約は、本ソフトウェアに含まれるかまたは共に提供される無償のオープン ソース コード、その関連文書および資料に対して、当該の無償のオープン ソース コード、その関連文書および資料を対象に適用される無償のオープン ソース ライセンスに基づいて、追加のまたは異なる権利をお客様が行使することを阻止または制限するものではありません。

明示的に許諾されていない本ソフトウェアのすべての権利は、CITRIX あるいはその製品供給者が留保します。

適用される強行法規 (たとえば、コンピューター プログラムの法的保護に関する EC 指令 91/250 に準拠した内国制定法) により、お客様が、CITRIX の同意を要せず、本ソフトウェアに係るある一定の情報を取得する権利を有する場合、お客様は、制定法上の当該権利を行使される前に先ず、CITRIX に対し書面でもって利用目的を明確にして当該情報を請求いただくものとします。CITRIX が、その単独の判断で当該情報の一部または全部の提供を拒否した場合に限り、お客様は、制定法上の当該権利を行使するものとします。

5. 侵害行為に対する補償。本ソフトウェア (オープン ソース ソフトウェアを除く) が第三者の特許または著作権を侵害するという申し立て (以下「侵害申立」といいます) に基づいて、お客様に対し提起される賠償請求、訴訟、または訴訟手続について、お客様が侵害申立についての通知を受けまたは発見したことを、CITRIX が当該の通知の遅れにより損害を受けないように速やかに CITRIX に書面で通知することを条件に、CITRIX は、当該の賠償請求、訴訟、または訴訟手続について、お客様の損失を補償し、また、CITRIX の選択により、訴訟追行または私的解決をするものとします。本第 5 条において「オープン ソース ソフトウェア」とは、オープン ソース ライセンス モデル (GNU General Public License、BSD、または Open Source Initiative により承認されるライセンスと同様のもの) に基づいて CITRIX により頒布されるソフトウェアを指します。CITRIX は、一切の侵害申立の訴訟追行または私的解決について単独の追行権限を有し、お客様は、侵害申立に対する攻撃・防禦について合理的な支援を提供するものとします。侵害申立の通知を受けて、または CITRIX が当該侵害申立の妥当性が高いと信じる場合、CITRIX は、その単独の費用と選択において、次の行為を行うことができます。(i) 侵害申立を受けた本ソフトウェアの使用を続行する権利をお客様のために獲得する、(ii) 本ソフトウェアを侵害しないものへ交換または変更する、または、(iii) 本ソフトウェアの返品を承認し、必要に応じて、お客様に返金する。以下の場合には、CITRIX は、あらゆる侵害申立または権利侵害の主張に対して、一切の責任を負いません。(i) 侵害申立により、本ソフトウェアの使用を停止すべきであると認識した後も、お客様が本ソフトウェアを使用した場合、(ii) お客様により、またはお客様の指示により本ソフトウェアが変更された場合、または (iii) 侵害申立が、本ソフトウェア単独の使用により避けられた場合に、CITRIX 製品でないプログラム、データ、ハードウェア、またはその他のものと、本ソフトウェアを組み合わせて使用した場合。上記内容は、あらゆる権利侵害主張に対するお客様の唯一の救済方法とします。

6. 限定担保責任および免責。CITRIX は、本ソフトウェアの引渡日から 90 日間、CITRIX によって発行され、本製品に同封されている製品付属文書どおりに本ソフトウェアが実質的に動作することを保証します。本限定担保責任条項に基づく CITRIX およびその製品供給者の全責任ならびにお客様の唯一の救済方法は、CITRIX 単独の選択および適用法令に従って、媒体または本ソフトウェアの交換、または購入金額の払戻と本契約の解約に限られます。ただし、お客様が本ソフトウェアを CITRIX またはその正規代理店に返品することが条件となります。本限定担保責任は、お客様による本ソフトウェアの一切の変更または関連問題についてはその対象といたしません。CITRIX は、専門職業的手法をもってお客様により要求されるサポートを提供しますが、お客様により提起されるすべての質問または問題が、一定の時間内に解決されることを保証するものではありません。コンサルティング サービスについて、本限定担保責任条項に基づく CITRIX およびその製品供給者の全責任ならびにお客様の唯一の救済方法は、サービスの再実施に限られます。

適用法令により許容される限度において、および本ソフトウェアに対する上記限定担保責任を除き、CITRIX およびその製品供給者は、明示、黙示、法令上その他いかなる担保責任または契約上の保証もお客様に付与するものではありません。また、CITRIX およびその製品供給者は、本ソフトウェア、アップデート、Subscription Advantage、およびサポートについて、品質、可用性、信頼性、安全性、またはコンピューター ウィルス、不具合、もしくは異常のないことに関わる一切の契約上の保証責任および、権原、平穏享有、平穏占有、商品性、第三者の権利を侵害しないこと、または特定目的への適合性を含む、一切の黙示の担保責任を否認します。本ソフトウェアは、その故障を直接の原因として人の生命、身体、財産または環境に係る深刻な被害をもたらすような装置との組合せによる使用または販売を目的として、設計、製造、または意図されたものではありません。お客様は、意図する結果を得るための本ソフトウェアおよびハードウェアの選択、ならびに、本ソフトウェアおよびハードウェアのインストール、使用、およびそれにより得られた結果について一切の責任を負うものとします。

7. 所有権。本ソフトウェアに係る権原および所有権は、お客様に譲渡されるものではありません。その改作物、変更物、翻訳、二次的著作物、および複製物を含め、本ソフトウェアに係るすべての知的財産権に係る一切の権原および所有権は、CITRIX および/またはそのライセンサーが保有し、留保します。お客様は、本ソフトウェアを使用するための限定的なライセンスを取得するものとします。

8. 輸出規制。お客様は、適切な政府の許可を得ずに、いかなる形態でも本ソフトウェアを輸出、再輸出または輸入しないことに合意します。お客様は、いかなる場合でも、アメリカ合衆国の禁輸対象国、アメリカ合衆国により取引の禁止を指定されている個人もしくは団体、またはアメリカ合衆国により特別に指定された同国人に対し、本ソフトウェアを輸出してはならないことを了解します。

9. 責任の限定。適用法令により許容される限度において、CITRIX もしくは、その関連会社、製品供給者、または正規ディストリビューターが、データまたは個人情報の喪失、収益の減損、機会または利益の逸失、回復費用、本ソフトウェアまたはサポートの使用により生じた損失、サードパーティ製ソフトウェアもしくはハードウェアの使用によって生じた損害、または、本契約に関連する、本ソフトウェアまたはサポート、関連資料、もしくは付属文書の使用によって生じた、またはお客様による本ソフトウェアの輸出、再輸出、もしくは輸入によって生じた、その他の特別、付随的、二次的、もしくは間接的な損害に対し、損害発生の態様またはその請求原因のいかんにかかわらず、一切責任を負わないものとします。この責任の限定は、CITRIX、その関連会社、製品供給者、または正規ディストリビューターが、当該損害の発生可能性を事前に通知されていた場合といえども、なお適用されるものとします。適用法令により許容される限度において、いかなる場合においても、CITRIX、その関連会社、製品供給者、または正規ディストリビューターの責任額は、問題となっている特定のソフトウェア、サブスクリプション (サポート付きのサブスクリプションを含む)、またはサポートを購入するためにお客様にお支払いいただいた金額を超えないものとします。お客様は、本ソフトウェアのライセンス料またはサポート料が、上記の危険負担を反映したものであることを了承するものとします。法域によっては、付随的または二次的損害に対する責任の限定または排除が認められていないことがあり、上記の責任の限定または排除がお客様に適用されない場合があります。本契約において「CITRIX の関連会社」とは、その 50% 以上の議決権を Citrix Systems, Inc. が直接または間接的に所有する法人をいいます。関連会社、製品供給者、および正規代理店は、本契約による利益を享受する第三受益者となります。

10. 解約。本契約は、本契約に基づき解約されない限り有効に存続するものとします。お客様は、本ソフトウェアをコンピューターから削除し、本ソフトウェアのすべての複製物を破棄し、解約するライセンスのシリアル番号を書面で CITRIX に通知することにより、いつでも本契約を解約することができます。CITRIX は、お客様の本契約に対する違反により、いつでも本契約を解約することができます。本ソフトウェアまたは付属文書の無許諾複製その他本契約に基づくライセンスの許諾条件に違反した場合、本契約は自動的に終了し、CITRIX はその他の一切の法的救済手段を講じることができるものとします。お客様は、本契約に対する重大な違反は、金銭損害賠償のみでは不十分な、回復不能な損害を CITRIX にもたらすこと、および、適用法令により許容される限度において、CITRIX が、保証金の支払いを要せず、差止命令による救済もしくは衡平法上の救済を受ける権利を有することに合意し、了承します。本契約の終了と同時に、本契約に基づいて許諾されたライセンスは終了するものとし、お客様は、直ちに本ソフトウェア、付属文書、およびすべてのバックアップ コピーを破棄しなければなりません。

11. お客様がアメリカ合衆国政府機関である場合。お客様がアメリカ合衆国政府機関である場合は、連邦調達規則第 12.212 条 (48 CFR 12.212 (1995 年 10 月)) ならびに国防連邦調達規則補遺第 227.7202-1 条および第 227.7202-3 条 (48 CFR 227.7202-1、227.7202-3 (1995 年 6 月)) に基づき、お客様は、本ソフトウェアが「商用コンピューター ソフトウェア」であり続けること、および、本契約により、本ソフトウェアのアメリカ合衆国政府またはその機関による使用、複製、および開示は、この標準的な商用ライセンス契約に定めるすべての条項、条件、制約、および制限に従うものとすることを了承するものとします。理由の如何を問わず、第 12.212 条、第 227.7202-1 条、または第 227.7202-3 条が適用法令とみなされない場合は、お客様は、アメリカ合衆国政府機関による本ソフトウェアの使用、複製、および開示の権利は、規定どおりに、48 CFR 第 52.227-19 条 (c) (1) および (2) 項 (1987 年 6 月)、または DFARS 第 252.227-7014 条 (a) (14) 項 (1995 年 6 月) に定義される「制限付き権利」に限定されることを了承するものとします。製造者は、Citrix Systems, Inc., 851 West Cypress Creek Road, Fort Lauderdale, Florida, 33309, United States of America です。

12. 正規ディストリビューターおよび正規販売代理店。CITRIX 正規ディストリビューターおよび販売代理店には、本契約を変更する権限、および CITRIX を拘束する一切の表明、約束、保証を行う権限はありません。

13. 準拠法および裁判籍の選択。提供者が Citrix Systems, Inc. の場合、本契約は、国際物品売買契約に関する国連条約の適用を排除し、かつ、抵触法の原則に関わりなく、アメリカ合衆国フロリダ州法に準拠するものとします。また、本契約に起因するいかなる紛争についても、お客様は、フロリダ州ブラウワード郡内の州裁判所および連邦裁判所が専属的に人的裁判管轄権ならびに裁判籍を有することに同意するものとします。提供者が Citrix Systems International GmbH の場合、本契約は、国際物品売買契約に関する国連条約の適用を排除し、かつ、抵触法の原則に関わりなく、スイス連邦共和国法に準拠するものとします。また、本契約に起因するいかなる紛争についても、お客様は、チューリッヒ州内の所管裁判所が専属的に人的裁判管轄権ならびに裁判籍を有することに同意するものとします。提供者が Citrix Systems Asia Pacific Pty Ltd の場合、本契約は、国際物品売買契約に関する国連条約の適用を排除し、オーストラリア連邦ニュー サウス ウェールズ州法に準拠するものとします。また、本契約に起因するいかなる紛争についても、お客様は、ニュー サウス ウェールズ州内の所管裁判所が専属的に人的裁判管轄権ならびに裁判籍を有することに同意するものとします。本契約の規定のいずれかが適用法令によって無効または強制不能であるとされた場合、その限度において、当該規定は削除、抹消されているものとみなし、本契約のその他すべての規定はなお有効に存続するものとします。当該規定が削除、抹消されているとみなす限りにおいて、両当事者は、本契約の当初の目的と整合性を有する方法で、本契約の残りの規定を遵守することに合意します。

14. CITRIX への連絡。本契約に関する質問その他 CITRIX へのお問い合わせは、CITRIX まで次の住所宛に書面でお送りください。Citrix Systems, Inc., Customer Service, 851 West Cypress Creek Road, Ft. Lauderdale, Florida 33309、Citrix Systems International GmbH, Rheinweg 9, CH-8200 Schaffhausen, Switzerland、または Citrix Systems Asia Pacific Pty Ltd., Level 3, 1 Julius Ave., Riverside Corporate Park, North Ryde NSW 2113, Sydney, Australia。

15. 商標。Citrix は、Citrix Systems, Inc. の米国およびそのほかの国における商標および/または登録商標です。

->>>>>>> 8bdf7ae... move all license html to build/license diff --git a/build/package.xml b/build/package.xml index 0fce961a485..523bb7b62a2 100755 --- a/build/package.xml +++ b/build/package.xml @@ -139,7 +139,7 @@
- + diff --git a/console-proxy/.classpath b/console-proxy/.classpath index dead7c796d1..c87c7cd3995 100644 --- a/console-proxy/.classpath +++ b/console-proxy/.classpath @@ -3,9 +3,5 @@ -<<<<<<< HEAD -======= - ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation diff --git a/console-proxy/src/com/cloud/consoleproxy/util/ITileScanListener.java b/console-proxy/src/com/cloud/consoleproxy/util/ITileScanListener.java index 167938091dc..b9364979887 100644 --- a/console-proxy/src/com/cloud/consoleproxy/util/ITileScanListener.java +++ b/console-proxy/src/com/cloud/consoleproxy/util/ITileScanListener.java @@ -10,7 +10,6 @@ // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 - package com.cloud.consoleproxy.util; import java.awt.Rectangle; diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/BufferedImageCanvas.java b/console-proxy/src/com/cloud/consoleproxy/vnc/BufferedImageCanvas.java index 9dc523d4f3d..e89d632dd0c 100644 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/BufferedImageCanvas.java +++ b/console-proxy/src/com/cloud/consoleproxy/vnc/BufferedImageCanvas.java @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Copyright 2012 Citrix Systems, Inc. Licensed under the // Apache License, Version 2.0 (the "License"); you may not use this // file except in compliance with the License. Citrix Systems, Inc. @@ -11,15 +10,12 @@ // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation package com.cloud.consoleproxy.vnc; import java.awt.Canvas; import java.awt.Color; import java.awt.Graphics; import java.awt.Graphics2D; -<<<<<<< HEAD import java.awt.Image; import java.awt.Rectangle; import java.awt.image.BufferedImage; @@ -28,19 +24,12 @@ import java.util.List; import com.cloud.consoleproxy.util.ImageHelper; import com.cloud.consoleproxy.util.TileInfo; -======= -import java.awt.image.BufferedImage; ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation /** * A BuffereImageCanvas component represents frame buffer image on the * screen. It also notifies its subscribers when screen is repainted. */ -<<<<<<< HEAD public class BufferedImageCanvas extends Canvas implements FrameBufferCanvas { -======= -public class BufferedImageCanvas extends Canvas { ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation private static final long serialVersionUID = 1L; // Offline screen buffer @@ -82,14 +71,9 @@ public class BufferedImageCanvas extends Canvas { public void paint(Graphics g) { // Only part of image, requested with repaint(Rectangle), will be // painted on screen. -<<<<<<< HEAD synchronized(offlineImage) { g.drawImage(offlineImage, 0, 0, this); } -======= - g.drawImage(offlineImage, 0, 0, this); - ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation // Notify server that update is painted on screen listener.imagePaintedOnScreen(); } @@ -101,7 +85,6 @@ public class BufferedImageCanvas extends Canvas { public Graphics2D getOfflineGraphics() { return graphics; } -<<<<<<< HEAD public void copyTile(Graphics2D g, int x, int y, Rectangle rc) { synchronized(offlineImage) { @@ -164,7 +147,4 @@ public class BufferedImageCanvas extends Canvas { } return imgBits; } -======= - ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation } \ No newline at end of file diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/FrameBufferUpdateListener.java b/console-proxy/src/com/cloud/consoleproxy/vnc/FrameBufferUpdateListener.java index 78ec97857c2..7f6b122c46e 100644 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/FrameBufferUpdateListener.java +++ b/console-proxy/src/com/cloud/consoleproxy/vnc/FrameBufferUpdateListener.java @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Copyright 2012 Citrix Systems, Inc. Licensed under the // Apache License, Version 2.0 (the "License"); you may not use this // file except in compliance with the License. Citrix Systems, Inc. @@ -11,8 +10,6 @@ // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation package com.cloud.consoleproxy.vnc; public interface FrameBufferUpdateListener { diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/PaintNotificationListener.java b/console-proxy/src/com/cloud/consoleproxy/vnc/PaintNotificationListener.java index 00d685247e7..cf74e6789b6 100644 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/PaintNotificationListener.java +++ b/console-proxy/src/com/cloud/consoleproxy/vnc/PaintNotificationListener.java @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Copyright 2012 Citrix Systems, Inc. Licensed under the // Apache License, Version 2.0 (the "License"); you may not use this // file except in compliance with the License. Citrix Systems, Inc. @@ -11,8 +10,6 @@ // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation package com.cloud.consoleproxy.vnc; public interface PaintNotificationListener { diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/RfbConstants.java b/console-proxy/src/com/cloud/consoleproxy/vnc/RfbConstants.java index 12af1579323..13dcf46abf2 100644 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/RfbConstants.java +++ b/console-proxy/src/com/cloud/consoleproxy/vnc/RfbConstants.java @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Copyright 2012 Citrix Systems, Inc. Licensed under the // Apache License, Version 2.0 (the "License"); you may not use this // file except in compliance with the License. Citrix Systems, Inc. @@ -11,8 +10,6 @@ // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation package com.cloud.consoleproxy.vnc; import java.nio.charset.Charset; diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/SimpleLogger.java b/console-proxy/src/com/cloud/consoleproxy/vnc/SimpleLogger.java deleted file mode 100644 index 2a6ae4d351e..00000000000 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/SimpleLogger.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.cloud.consoleproxy.vnc; - -public class SimpleLogger { - - public static void log(String message) { - System.out.println(getPrefix(1) + " LOG: " + message); - } - - public static void log(int skipFrames, String message) { - System.out.println(getPrefix(1+skipFrames) + " LOG: " + message); - } - - public static void debug(String message) { - System.out.println(getPrefix(1) + " DEBUG: " + message); - } - - public static void info(String message) { - System.out.println(getPrefix(1) + " INFO: " + message); - } - - public static void warn(String message) { - System.err.println(getPrefix(1) + " WARN: " + message); - } - - public static void error(String message) { - System.err.println(getPrefix(1) + " ERROR: " + message); - } - - private static String getPrefix(int skipFrames) { - StackTraceElement frame; - try { - throw new RuntimeException(); - } catch (Exception e) { - frame = e.getStackTrace()[1+skipFrames]; - } - - return "(" + frame.getFileName() + ":" + frame.getLineNumber() + ") " + frame.getMethodName() + "()"; - } - -} diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/VncClient.java b/console-proxy/src/com/cloud/consoleproxy/vnc/VncClient.java index 79e51888dfd..b7ed80c008c 100644 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/VncClient.java +++ b/console-proxy/src/com/cloud/consoleproxy/vnc/VncClient.java @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Copyright 2012 Citrix Systems, Inc. Licensed under the // Apache License, Version 2.0 (the "License"); you may not use this // file except in compliance with the License. Citrix Systems, Inc. @@ -11,8 +10,6 @@ // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation package com.cloud.consoleproxy.vnc; import java.awt.Frame; @@ -31,7 +28,6 @@ import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; import javax.crypto.spec.DESKeySpec; -<<<<<<< HEAD import com.cloud.consoleproxy.ConsoleProxyClientListener; import com.cloud.consoleproxy.util.Logger; import com.cloud.consoleproxy.util.RawHTTP; @@ -40,9 +36,6 @@ import com.cloud.consoleproxy.vnc.packet.client.MouseEventPacket; public class VncClient { private static final Logger s_logger = Logger.getLogger(VncClient.class); -======= -public class VncClient { ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation private Socket socket; private DataInputStream is; @@ -52,12 +45,9 @@ public class VncClient { private VncClientPacketSender sender; private VncServerPacketReceiver receiver; -<<<<<<< HEAD private boolean noUI = false; private ConsoleProxyClientListener clientListener = null; -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation public static void main(String args[]) { if (args.length < 3) { @@ -70,7 +60,6 @@ public class VncClient { String password = args[2]; try { -<<<<<<< HEAD new VncClient(host, Integer.parseInt(port), password, false, null); } catch (NumberFormatException e) { s_logger.error("Incorrect VNC server port number: " + port + "."); @@ -84,27 +73,11 @@ public class VncClient { } catch (Throwable e) { s_logger.error("An error happened: " + e.getMessage()); System.exit(1); -======= - new VncClient(host, Integer.parseInt(port), password); - } catch (NumberFormatException e) { - SimpleLogger.error("Incorrect VNC server port number: " + port + "."); - System.exit(1); - } catch (UnknownHostException e) { - SimpleLogger.error("Incorrect VNC server host name: " + host + "."); - System.exit(1); - } catch (IOException e) { - SimpleLogger.error("Cannot communicate with VNC server: " + e.getMessage()); - System.exit(1); - } catch (Throwable e) { - SimpleLogger.error("An error happened: " + e.getMessage()); - System.exit(1); ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation } System.exit(0); } private static void printHelpMessage() { -<<<<<<< HEAD /* LOG */s_logger.info("Usage: HOST PORT PASSWORD."); } @@ -174,44 +147,6 @@ public class VncClient { } private void doConnect(String password) throws IOException { -======= - /* LOG */SimpleLogger.info("Usage: HOST PORT PASSWORD."); - } - - public VncClient(String host, int port, String password) throws UnknownHostException, IOException { - connectTo(host, port, password); - } - - void shutdown() { - sender.closeConnection(); - receiver.closeConnection(); - - try { - is.close(); - } catch (Throwable e) { - } - - try { - os.close(); - } catch (Throwable e) { - } - - try { - socket.close(); - } catch (Throwable e) { - } - - } - - public void connectTo(String host, int port, String password) throws UnknownHostException, IOException { - // If port number is too small, then interpret it as display number. - if (port < 100) - port += 5900; - - // Connect to server - SimpleLogger.info("Connecting to VNC server " + host + ":" + port + "..."); - this.socket = new Socket(host, port); ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation is = new DataInputStream(socket.getInputStream()); os = new DataOutputStream(socket.getOutputStream()); @@ -231,18 +166,13 @@ public class VncClient { canvas.addMouseMotionListener(sender); canvas.addKeyListener(sender); -<<<<<<< HEAD Frame frame = null; if(!noUI) frame = createVncClientMainWindow(canvas, screen.getDesktopName()); -======= - Frame frame = createVncClientMainWindow(canvas, screen.getDesktopName()); ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation new Thread(sender).start(); // Run server-to-client packet receiver -<<<<<<< HEAD receiver = new VncServerPacketReceiver(is, canvas, screen, this, sender, clientListener); try { receiver.run(); @@ -253,17 +183,6 @@ public class VncClient { } this.shutdown(); } -======= - receiver = new VncServerPacketReceiver(is, canvas, screen, this, sender); - try { - receiver.run(); - } finally { - frame.setVisible(false); - frame.dispose(); - this.shutdown(); - } - ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation } private Frame createVncClientMainWindow(BufferedImageCanvas canvas, String title) { @@ -452,11 +371,8 @@ public class VncClient { int framebufferWidth = is.readUnsignedShort(); int framebufferHeight = is.readUnsignedShort(); screen.setFramebufferSize(framebufferWidth, framebufferHeight); -<<<<<<< HEAD if(clientListener != null) clientListener.onFramebufferSizeChange(framebufferWidth, framebufferHeight); -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation } // Read pixel format @@ -490,7 +406,6 @@ public class VncClient { screen.setDesktopName(desktopName); } } -<<<<<<< HEAD public FrameBufferCanvas getFrameBufferCanvas() { if(receiver != null) @@ -517,7 +432,4 @@ public class VncClient { public boolean isHostConnected() { return receiver != null && receiver.isConnectionAlive(); } -======= - ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation } diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/VncClientPacketSender.java b/console-proxy/src/com/cloud/consoleproxy/vnc/VncClientPacketSender.java index 9f628f54723..671cbd51e70 100644 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/VncClientPacketSender.java +++ b/console-proxy/src/com/cloud/consoleproxy/vnc/VncClientPacketSender.java @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Copyright 2012 Citrix Systems, Inc. Licensed under the // Apache License, Version 2.0 (the "License"); you may not use this // file except in compliance with the License. Citrix Systems, Inc. @@ -11,8 +10,6 @@ // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation package com.cloud.consoleproxy.vnc; import java.awt.event.KeyEvent; @@ -55,13 +52,10 @@ public class VncClientPacketSender implements Runnable, PaintNotificationListene sendSetEncodings(); requestFullScreenUpdate(); } -<<<<<<< HEAD public void sendClientPacket(ClientPacket packet) { queue.add(packet); } -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation @Override public void run() { diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/VncScreenDescription.java b/console-proxy/src/com/cloud/consoleproxy/vnc/VncScreenDescription.java index c8f9adeb94e..9824c637311 100644 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/VncScreenDescription.java +++ b/console-proxy/src/com/cloud/consoleproxy/vnc/VncScreenDescription.java @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Copyright 2012 Citrix Systems, Inc. Licensed under the // Apache License, Version 2.0 (the "License"); you may not use this // file except in compliance with the License. Citrix Systems, Inc. @@ -11,8 +10,6 @@ // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation package com.cloud.consoleproxy.vnc; /** diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/VncServerPacketReceiver.java b/console-proxy/src/com/cloud/consoleproxy/vnc/VncServerPacketReceiver.java index 072bae53cd3..757f5acaf82 100644 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/VncServerPacketReceiver.java +++ b/console-proxy/src/com/cloud/consoleproxy/vnc/VncServerPacketReceiver.java @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Copyright 2012 Citrix Systems, Inc. Licensed under the // Apache License, Version 2.0 (the "License"); you may not use this // file except in compliance with the License. Citrix Systems, Inc. @@ -11,8 +10,6 @@ // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation package com.cloud.consoleproxy.vnc; import java.awt.Toolkit; @@ -20,19 +17,13 @@ import java.awt.datatransfer.StringSelection; import java.io.DataInputStream; import java.io.IOException; -<<<<<<< HEAD import com.cloud.consoleproxy.ConsoleProxyClientListener; import com.cloud.consoleproxy.util.Logger; -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation import com.cloud.consoleproxy.vnc.packet.server.FramebufferUpdatePacket; import com.cloud.consoleproxy.vnc.packet.server.ServerCutText; public class VncServerPacketReceiver implements Runnable { -<<<<<<< HEAD private static final Logger s_logger = Logger.getLogger(VncServerPacketReceiver.class); -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation private final VncScreenDescription screen; private BufferedImageCanvas canvas; @@ -41,29 +32,20 @@ public class VncServerPacketReceiver implements Runnable { private boolean connectionAlive = true; private VncClient vncConnection; private final FrameBufferUpdateListener fburListener; -<<<<<<< HEAD private final ConsoleProxyClientListener clientListener; public VncServerPacketReceiver(DataInputStream is, BufferedImageCanvas canvas, VncScreenDescription screen, VncClient vncConnection, FrameBufferUpdateListener fburListener, ConsoleProxyClientListener clientListener) { -======= - - public VncServerPacketReceiver(DataInputStream is, BufferedImageCanvas canvas, VncScreenDescription screen, VncClient vncConnection, - FrameBufferUpdateListener fburListener) { ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation this.screen = screen; this.canvas = canvas; this.is = is; this.vncConnection = vncConnection; this.fburListener = fburListener; -<<<<<<< HEAD this.clientListener = clientListener; } public BufferedImageCanvas getCanvas() { return canvas; -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation } @Override @@ -82,11 +64,7 @@ public class VncServerPacketReceiver implements Runnable { // so it can send another frame buffer update request fburListener.frameBufferPacketReceived(); // Handle frame buffer update -<<<<<<< HEAD new FramebufferUpdatePacket(canvas, screen, is, clientListener); -======= - new FramebufferUpdatePacket(canvas, screen, is); ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation break; } @@ -103,15 +81,9 @@ public class VncServerPacketReceiver implements Runnable { default: throw new RuntimeException("Unknown server packet type: " + messageType + "."); } -<<<<<<< HEAD } } catch (Throwable e) { -======= - - } - } catch (Throwable e) { ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation if (connectionAlive) { closeConnection(); vncConnection.shutdown(); @@ -122,13 +94,10 @@ public class VncServerPacketReceiver implements Runnable { public void closeConnection() { connectionAlive = false; } -<<<<<<< HEAD public boolean isConnectionAlive() { return connectionAlive; } -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation /** * Handle server bell packet. @@ -145,10 +114,6 @@ public class VncServerPacketReceiver implements Runnable { StringSelection contents = new StringSelection(clipboardContent.getContent()); Toolkit.getDefaultToolkit().getSystemClipboard().setContents(contents, null); -<<<<<<< HEAD s_logger.info("Server clipboard buffer: "+clipboardContent.getContent()); -======= - SimpleLogger.info("Server clipboard buffer: "+clipboardContent.getContent()); ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation } } diff --git a/console-proxy/src/com/cloud/consoleproxy/vnc/packet/server/FramebufferUpdatePacket.java b/console-proxy/src/com/cloud/consoleproxy/vnc/packet/server/FramebufferUpdatePacket.java index b019edb0d1c..527c12db57c 100644 --- a/console-proxy/src/com/cloud/consoleproxy/vnc/packet/server/FramebufferUpdatePacket.java +++ b/console-proxy/src/com/cloud/consoleproxy/vnc/packet/server/FramebufferUpdatePacket.java @@ -15,10 +15,7 @@ package com.cloud.consoleproxy.vnc.packet.server; import java.io.DataInputStream; import java.io.IOException; -<<<<<<< HEAD import com.cloud.consoleproxy.ConsoleProxyClientListener; -======= ->>>>>>> 52ebf15... Console proxy refactoring incremental check-in - new VNC protocol implementation import com.cloud.consoleproxy.vnc.BufferedImageCanvas; import com.cloud.consoleproxy.vnc.RfbConstants; import com.cloud.consoleproxy.vnc.VncScreenDescription; diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index 6ae384bb6eb..2a42d471136 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -530,25 +530,14 @@ body.login { -webkit-box-shadow: 0px 4px 10px #B9B9B9; -o-box-shadow: 0px 4px 10px #B9B9B9; box-shadow: 0px 4px 10px #B9B9B9; -<<<<<<< HEAD -<<<<<<< HEAD padding: 5px; -======= - padding: 0; ->>>>>>> 6d8a11c... Complete localization for quick install wizard -======= - padding: 5px; ->>>>>>> ccd7d8b... Install wizard: Conditionally load EULA /*+border-radius:4px;*/ -moz-border-radius: 4px; -webkit-border-radius: 4px; -khtml-border-radius: 4px; border-radius: 4px 4px 4px 4px; overflow: auto; -<<<<<<< HEAD overflow-x: hidden; -======= ->>>>>>> ccd7d8b... Install wizard: Conditionally load EULA } .install-wizard .eula-copy p { @@ -2857,7 +2846,6 @@ Dialogs*/ -webkit-text-shadow: 0px 1px 1px #FFFFFF; -o-text-shadow: 0px 1px 1px #FFFFFF; text-shadow: 0px 1px 1px #FFFFFF; - padding-bottom: 40px; } .ui-dialog span.message ul { diff --git a/ui/index.jsp b/ui/index.jsp index 2f48a4ac1b5..041a672111a 100644 --- a/ui/index.jsp +++ b/ui/index.jsp @@ -59,10 +59,7 @@ @@ -267,11 +264,7 @@
-<<<<<<< HEAD
-======= -
->>>>>>> 240dabe... Navigation organizational changes
@@ -497,23 +490,11 @@ zone-wizard-step-id="setupPhysicalNetwork" zone-wizard-prefilter="setupPhysicalNetwork">
@@ -521,12 +502,6 @@
-======= - -
- -
->>>>>>> 2f682ea... cloudstack 3.0 new UI - add zone wizard - localize text in html code.
 
@@ -616,26 +591,11 @@ zone-wizard-form="basicPhysicalNetwork" zone-wizard-prefilter="addNetscalerDevice">
@@ -648,40 +608,16 @@
-======= - - -
-<<<<<<< HEAD - ->>>>>>> 2f682ea... cloudstack 3.0 new UI - add zone wizard - localize text in html code. -======= - - ->>>>>>> a56d465... cloudstack 3.0 new UI - add zone wizard - public traffic - show different description for basic zone and advanced zone.
@@ -690,26 +626,11 @@
@@ -726,40 +647,16 @@ zone-wizard-step-id="configureGuestTraffic" zone-wizard-prefilter="configureGuestTraffic">
-======= - - -
-<<<<<<< HEAD - ->>>>>>> 2f682ea... cloudstack 3.0 new UI - add zone wizard - localize text in html code. -======= - - ->>>>>>> f0e3cad... cloudstack 3.0 new UI - add zone wizard - guest traffic - show different description for basic zone and advanced zone.
@@ -770,38 +667,15 @@
-======= -
  • Netscaler
  • -
  • Public traffic
  • -
  • Pod
  • -
  • Guest Traffic
  • -
  • Storage Traffic
  • - - -
    - Traffic between CloudStack's internal resources, including any components that communicate with the Management Server, such as hosts and CloudStack system VMs. Please configure storage traffic here. ->>>>>>> 5c06960... bug 13743: New zone wizard step -- configure storage traffic -======= - - -
    - ->>>>>>> 2f682ea... cloudstack 3.0 new UI - add zone wizard - localize text in html code.
    @@ -877,11 +751,7 @@
    -<<<<<<< HEAD
    -======= -
    ->>>>>>> 2f682ea... cloudstack 3.0 new UI - add zone wizard - localize text in html code. @@ -928,12 +798,8 @@
  • -<<<<<<< HEAD
    -======= -
    ->>>>>>> fab34ce... cloudstack 3.0 new UI - system - network chart - localize text in html code.
  • @@ -1072,24 +938,18 @@
    -<<<<<<< HEAD
      Update SSL Certificate -======= -
    - ->>>>>>> fab34ce... cloudstack 3.0 new UI - system - network chart - localize text in html code.
    -<<<<<<< HEAD
  • -======= - -
    -
  • -
      -
    • - ->>>>>>> fab34ce... cloudstack 3.0 new UI - system - network chart - localize text in html code. " view-all-target="zones">
    • -<<<<<<< HEAD -======= - ->>>>>>> fab34ce... cloudstack 3.0 new UI - system - network chart - localize text in html code. " view-all-target="pods">
    • -<<<<<<< HEAD -======= - ->>>>>>> fab34ce... cloudstack 3.0 new UI - system - network chart - localize text in html code. " view-all-target="clusters">
    • -<<<<<<< HEAD -======= - ->>>>>>> fab34ce... cloudstack 3.0 new UI - system - network chart - localize text in html code. " view-all-target="hosts">
    • -<<<<<<< HEAD
    • @@ -1165,33 +1004,6 @@ " view-all-target="systemVms"> -======= -
    -
    - - -
    -
    - -
    -
    -