diff --git a/DISCLAIMER b/DISCLAIMER new file mode 100644 index 00000000000..fa1e9261a36 --- /dev/null +++ b/DISCLAIMER @@ -0,0 +1,7 @@ +Apache CloudStack is an effort undergoing incubation at The Apache Software Foundation (ASF), +sponsored by the Apache Incubator. Incubation is required of all newly accepted +projects until a further review indicates that the infrastructure, communications, and +decision making process have stabilized in a manner consistent with other successful ASF +projects. While incubation status is not necessarily a reflection of the completeness or +stability of the code, it does indicate that the project has yet to be fully endorsed by +the ASF. diff --git a/KEYS b/KEYS index 564aa799702..b0f868529e9 100644 --- a/KEYS +++ b/KEYS @@ -140,3 +140,61 @@ uHR4OoUFg2G1Hbm7k4eb1SFT1jPEe3is4Oc8t1ORRfSBIH0FfLF1ylLFpSma5q+3 HpWraBFdP78= =I9dG -----END PGP PUBLIC KEY BLOCK----- +pub 4096R/2908DED2 2012-06-29 +uid John Kinsella +sig 3 2908DED2 2012-08-07 John Kinsella +sub 4096R/26F845B7 2012-06-29 +sig 2908DED2 2012-08-07 John Kinsella + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG/MacGPG2 v2.0.17 (Darwin) +Comment: GPGTools - http://gpgtools.org + +mQINBE/uBAgBEAC9f6Cjh4vS1eY8g5O9rX1P6qhUWfoh8e1stAuKWVUsNfR3C4w3 +BZef4dDTMMHaXfJnZ7oFsMUghjzKI1/Fy2rhJ99ZEf8NgxYyy5nR4TUfHdlXAat0 +tF3amfGzruJoVorybFEiGVIsYcuDPVxC7jVXGgkaMZ9PD1pyD4cSGYafObDuVr5z +MM0P6X1C4dT/pShiKOBhuX4XJdGk910uEtniWHWaIHIN3KBCQL7xgw0GxRPAmoTY +GPmt1Ee2N8E+o2UzgvqAc+iQH6z1iqHakCCkH/707XscpUrr00bPHk92lgQrzGOw +kmXUdTipAM9wqzPZ6EmtT3WV9uT8HV608VTEvZSHuuYpZdFW4IwWXziZUqx6z8EX +miKlFChHIkeARZgmxdIB3m4r05yU2MG/A4VaixcNGOXAjSaV+EyWXqecMlGJXLbF +rnaGfRshOo3mLG2UE/LI/y/4S0RYVlky0LzWZqihcfL/sT2Tc4OLAN8wKOXhlwd2 +s/68wmzOq+67KT84YTxsixUbS2yBg8nfS6SMz5irWvlELQyeiPkDttuzDxSC8Koz +jR28az1VVkqT88VrRtb3oVyV2T7Za/yYHO/IsrjimgvIA1BKnq6E+0uXZbI5HKkA +/FGTP9N9J3YwW4eFBilXIt47OIkuBgHUwZsBMpLZfWNktLgB2nAIcz2VFQARAQAB +tCBKb2huIEtpbnNlbGxhIDxqbGtAc3RyYXRvc2VjLmNvPokCNwQTAQIAIQIbAwIe +AQIXgAULCQgHAwUVCgkICwUWAgMBAAUCUCCB1QAKCRDqJvTdKQje0hoMD/9Ssbjf +XF3V6of8563Ro961TrU38E7CLjrA8mrwVHllz9ikoXDhXgMfFg7WrtzEs/EHw7xW +iYwJxS2R1mKyu4zP4Qx38TnH++DsLx6n8m5L2uhaMlZCdqaaXm0nWgu1L4ZQv6OR +6BmVnEged98rsIuOfdXqxbe+vxx7kmXxQnBnRIGOfCKce5kqr/uLKFCBTQdKo0Va +WxXwa/2b0MpN7XEollY1O20185wQXxpe7/6k55wi6ZDUiIw7pollMnSNAj/Ic4Cr +CGj5MGzc4uLnRpIjjbfUif0CRfQ8x3s++IR4KDGZbLfLkUAcHrKGV720TEmf3Ym5 +EvSg20M6mbyOGNUlXdZ69aQAkhCTVwbYNC0E83KsV0K48o538SyhnYzQvSnyqHNv +AMYwRXu+9m3lRmO9FqZ69Qm+fap+QUWlEmYZFNzmhH8F5WWC6EqN0e6JDt4RwDlc +taHF9mSpQYLipsD3yfr5tzd4J9AIWItfEcuaKG2r5kVTyUZMp1yu2+ByvnfGna2R +dHJLwCKGvowlTfrcQ/+ic56YEQrIe4Sy7zbsFbKlOzVNoQyk9814kF4My7nzDPwg +M9qwfcW84kQZzh47uYFVz6BDNcDcIUlo6ODGMHs2MM6Oqxo+NfruXwfKZeHIWsvw +CDuqPNRN5oIUK6txqrYr7nj0GUj04W5LltztvbkCDQRP7gQIARAAykm2inv3OUIX +/3KnGeQYluoYa8cWv4lBV/F1x19qcCgpn2GtZFrwm8/1lLUIRHBsxardE36sMCme +bGilXSyH/Him8gHTn3t/i4jy0EWNcBU5B6C0hfG0DZBGvYjxWA22wRxr0x5CReoa +nZYq3lfLSzHjRbbAlZo9hYp2PpOrsPGGYSMWasANIODQ5Ium97TEWm8NyVBX8tdO +jYz3SCR27I1UTPII7iOhrDuWVqV2orBgDcOlMrIdHN1vg5YKWTU4VqTn0gr2Py02 +iB+bW2eENnG8BYNeL+CBrG7guwsvFvNWlN3KbiSdN360qzYmLly5jmIH6baLIGS/ +nCgKPo91r5YY59fM9OxiT8hi/5nidfyy/HrqAp5IO5E9WdjTrBrMpDAm/oWfy/He +8gHjbcuX2bUp0UFgA4bo7ElTEN7clCU6AjX+g+mvGAvzJZOZ/t6jf8bfsd8G1FgU +ND47WPCCKfJyrnbaqAh4chnzsuh2L5Ujrk8l4Y8X50zstybwpPqk60Rednw0N2kh +tcLnMkvNqy6Vmvi0uBkxVonVJi3S/FOc+DJreQwqkc/+vrY+zW0+F3qrtTeP+uWQ +IyeW0wMuZPqXrnfRkoui5BVDzI/CEoWyyTKa2j6CtDGWnUqtIig2BHk0Ux80L0Gy +fBqqeuE5qVnjmPHBagOUsX1qwJ45/+8AEQEAAYkCHwQYAQIACQIbDAUCUCCELgAK +CRDqJvTdKQje0rfmEACqzADegNqY3ds2yyWz2SO/3Ihwsq8UX7n2WHPJdVhcAyzw +Xn463n+5iXYdIGhSeNd14hIHVyab3nZVY2C4Cd1IAK5QUSVkK8tcwKlPM8gHUVSu +ZUx1FBjjDBz5/EThV/f7N9bBrKtJN0DkzzqnGoNbpSsoP+CTk7kxeRmhXlK8lrr/ +ekVt6gtqi2y+sqwWfJXN955oy5aT2c+bQFsBOoKMt/bpLEDD7giVXgKfKJ6+X/Qe +3jW36aPxtW26TTXUBZr6FmhBwmXyCt6tv+/5VeP5R68CK8q10EYDGgjcWmsa8wrr +Xe2ILFA3oUMytXGp3+WbT1MUXDaHUhE+PCugGwyMPw+pXf+ADZMIjESa2lk+mmE4 +Worss4fFHIpXICMkJKRo7P9NAPUMC6u34EMuNEv4XfBd/zalTpAq/vvy6cMPaeAa +Iik1ML6E8YZ3TeKbQoZ++ZQT/MEwSOjrsx61a4yf/bzLvcqppKBxVdtCBsePPgHA +bOXIAKv3iqD9Cq5GxLpSAH9E+KlQpJmVDsR1b15G6jceGyZ20NP2Mf6O8pqY1qfC +p7S7eSiSwAZT56oHx9ULzBAKYvwyyoIVOnc0ddwhLeGq2flP7xSo54oDw4KRlFuu +rHMalutU5/Bc0tFZtdm6DnSnwtg3fBnwgFBeINJCc31xuX474+071mfaQUVO7g== +=BzOC +-----END PGP PUBLIC KEY BLOCK----- diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000000..bf90869e0a5 --- /dev/null +++ b/LICENSE @@ -0,0 +1,207 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +============================ End of Apache License file V 2.0 =================== + +Third party licenses +==================== + diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000000..8a3b0bf2494 --- /dev/null +++ b/NOTICE @@ -0,0 +1,8 @@ +Apache CloudStack +Copyright 2012 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product includes JUnit (http://www.junit.org/) +under the Common Public License Version 1.0: http://www.opensource.org/licenses/cpl.php diff --git a/api/src/com/cloud/network/element/NetworkElement.java b/api/src/com/cloud/network/element/NetworkElement.java index 3fdb094d9bb..ec8e7bce2e8 100644 --- a/api/src/com/cloud/network/element/NetworkElement.java +++ b/api/src/com/cloud/network/element/NetworkElement.java @@ -18,6 +18,7 @@ package com.cloud.network.element; import java.util.List; import java.util.Map; +import java.util.Set; import com.cloud.deploy.DeployDestination; import com.cloud.exception.ConcurrentOperationException; @@ -137,5 +138,5 @@ public interface NetworkElement extends Adapter { * @param services * @return true/false */ - boolean verifyServicesCombination(List services); + boolean verifyServicesCombination(Set services); } diff --git a/api/src/com/cloud/user/UserContext.java b/api/src/com/cloud/user/UserContext.java index b140728e596..ed33e88d027 100644 --- a/api/src/com/cloud/user/UserContext.java +++ b/api/src/com/cloud/user/UserContext.java @@ -16,14 +16,11 @@ // under the License. package com.cloud.user; -import com.cloud.server.ManagementService; -import com.cloud.utils.component.ComponentLocator; public class UserContext { private static ThreadLocal s_currentContext = new ThreadLocal(); - private static final ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - private static final AccountService _accountMgr = locator.getManager(AccountService.class); + private static UserContext s_adminContext = null; private long userId; private String sessionId; @@ -33,9 +30,7 @@ public class UserContext { private String eventDetails; private boolean apiServer; - - private static UserContext s_adminContext = new UserContext(_accountMgr.getSystemUser().getId(), _accountMgr.getSystemAccount(), null, false); - + public UserContext() { } @@ -134,4 +129,10 @@ public class UserContext { public String getEventDetails() { return eventDetails; } + + public static synchronized void setAdminContext(UserContext adminContext) { + if (adminContext == null) { + UserContext.s_adminContext = adminContext; + } + } } diff --git a/awsapi/.classpath b/awsapi/.classpath index 4dc46c4b1ec..03d1a1fc3e9 100644 --- a/awsapi/.classpath +++ b/awsapi/.classpath @@ -1,22 +1,4 @@ - diff --git a/build/build-cloud.xml b/build/build-cloud.xml index b4d120ff2d8..77316b62b90 100755 --- a/build/build-cloud.xml +++ b/build/build-cloud.xml @@ -480,7 +480,7 @@ - + diff --git a/build/developer.xml b/build/developer.xml index 2d5f2db5dcc..d52b6823758 100755 --- a/build/developer.xml +++ b/build/developer.xml @@ -241,7 +241,7 @@ - + diff --git a/docs/en-US/Book_Info.xml b/docs/en-US/Book_Info.xml index 6ff42f28bd1..cdb8e21580f 100644 --- a/docs/en-US/Book_Info.xml +++ b/docs/en-US/Book_Info.xml @@ -4,6 +4,7 @@ %BOOK_ENTITIES; ]> + +
Enabling or Disabling Static NAT If port forwarding rules are already in effect for an IP address, you cannot enable static NAT to that IP. If a guest VM is part of more than one network, static NAT rules will function only if they are defined on the default network. - Log in to the CloudPlatform UI as an administrator or end user. + Log in to the &PRODUCT; UI as an administrator or end user. In the left navigation, choose Network. Click the name of the network where you want to work with. Click View IP Addresses. diff --git a/docs/en-US/enable-security-groups.xml b/docs/en-US/enable-security-groups.xml index b97b65e7922..f2e07b3114d 100644 --- a/docs/en-US/enable-security-groups.xml +++ b/docs/en-US/enable-security-groups.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Enabling Security Groups In order for security groups to function in a zone, the security groups feature must first be enabled for the zone. The administrator can do this when creating a new zone, by selecting a network offering that includes security groups. The procedure is described in Basic Zone Configuration in the Advanced Installation Guide. diff --git a/docs/en-US/enabling-api-call-expiration.xml b/docs/en-US/enabling-api-call-expiration.xml index 9ebbb208033..dd0d9ceccfc 100644 --- a/docs/en-US/enabling-api-call-expiration.xml +++ b/docs/en-US/enabling-api-call-expiration.xml @@ -1,3 +1,27 @@ + + +%BOOK_ENTITIES; +]> + + +
Enabling API Call Expiration diff --git a/docs/en-US/enabling-port-8096.xml b/docs/en-US/enabling-port-8096.xml index 55214399437..9609ee40523 100644 --- a/docs/en-US/enabling-port-8096.xml +++ b/docs/en-US/enabling-port-8096.xml @@ -1,3 +1,27 @@ + + +%BOOK_ENTITIES; +]> + + +
Enabling Port 8096 diff --git a/docs/en-US/end-user-ui-overview.xml b/docs/en-US/end-user-ui-overview.xml index 879485cbe28..9c52720d69e 100644 --- a/docs/en-US/end-user-ui-overview.xml +++ b/docs/en-US/end-user-ui-overview.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
End User's UI Overview The &PRODUCT; UI helps users of cloud infrastructure to view and use their cloud resources, including virtual machines, templates and ISOs, data volumes and snapshots, guest networks, and IP addresses. If the user is a member or administrator of one or more &PRODUCT; projects, the UI can provide a project-oriented view. diff --git a/docs/en-US/error-handling.xml b/docs/en-US/error-handling.xml index 1b2684f3413..869e964850e 100644 --- a/docs/en-US/error-handling.xml +++ b/docs/en-US/error-handling.xml @@ -1,6 +1,29 @@ + + +%BOOK_ENTITIES; +]> + + +
Error Handling If an error occurs while processing an API request, the appropriate response in the format specified is returned. Each error response consists of an error code and an error text describing what possibly can go wrong. For an example error response, see page 12. An HTTP error code of 401 is always returned if API request was rejected due to bad signatures, missing API Keys, or the user simply did not have the permissions to execute the command.
- diff --git a/docs/en-US/event-log-queries.xml b/docs/en-US/event-log-queries.xml index b425ce0dfec..1eb449783fc 100644 --- a/docs/en-US/event-log-queries.xml +++ b/docs/en-US/event-log-queries.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Event Log Queries Database logs can be queried from the user interface. The list of events captured by the system includes: diff --git a/docs/en-US/event-types.xml b/docs/en-US/event-types.xml index f9570b1450f..7be69706529 100644 --- a/docs/en-US/event-types.xml +++ b/docs/en-US/event-types.xml @@ -1,3 +1,27 @@ + + +%BOOK_ENTITIES; +]> + + +
Event Types @@ -193,4 +217,4 @@ -
\ No newline at end of file +
diff --git a/docs/en-US/events-log.xml b/docs/en-US/events-log.xml index 4087e58bbf4..5db856a8b7f 100644 --- a/docs/en-US/events-log.xml +++ b/docs/en-US/events-log.xml @@ -1,10 +1,28 @@ - %BOOK_ENTITIES; ]> + + +
Event Logs - There are two types of events logged in the CloudPlatform Event Log. Standard events log the success or failure of an event and can be used to identify jobs or processes that have failed. There are also long running job events. Events for asynchronous jobs log when a job is scheduled, when it starts, and when it completes. Other long running synchronous jobs log when a job starts, and when it completes. Long running synchronous and asynchronous event logs can be used to gain more information on the status of a pending job or can be used to identify a job that is hanging or has not started. The following sections provide more information on these events.. + There are two types of events logged in the &PRODUCT; Event Log. Standard events log the success or failure of an event and can be used to identify jobs or processes that have failed. There are also long running job events. Events for asynchronous jobs log when a job is scheduled, when it starts, and when it completes. Other long running synchronous jobs log when a job starts, and when it completes. Long running synchronous and asynchronous event logs can be used to gain more information on the status of a pending job or can be used to identify a job that is hanging or has not started. The following sections provide more information on these events..
- diff --git a/docs/en-US/events.xml b/docs/en-US/events.xml index b1a67c13201..9d672ff4c0d 100644 --- a/docs/en-US/events.xml +++ b/docs/en-US/events.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
diff --git a/docs/en-US/example-LDAP-configuration-commands.xml b/docs/en-US/example-LDAP-configuration-commands.xml index a91a9094ae5..e557386c3ca 100644 --- a/docs/en-US/example-LDAP-configuration-commands.xml +++ b/docs/en-US/example-LDAP-configuration-commands.xml @@ -1,11 +1,30 @@ - %BOOK_ENTITIES; ]> + + +
Example LDAP Configuration Commands - To understand the examples in this section, you need to know the basic concepts behind calling the CloudPlatform API, which are explained in the Developer’s Guide. + To understand the examples in this section, you need to know the basic concepts behind calling the &PRODUCT; API, which are explained in the Developer’s Guide. The following shows an example invocation of ldapConfig with an ApacheDS LDAP server http://127.0.0.1:8080/client/api?command=ldapConfig&hostname=127.0.0.1&searchbase=ou%3Dtesting%2Co%3Dproject&queryfilter=%28%26%28uid%3D%25u%29%29&binddn=cn%3DJohn+Singh%2Cou%3Dtesting%2Co%project&bindpass=secret&port=10389&ssl=true&truststore=C%3A%2Fcompany%2Finfo%2Ftrusted.ks&truststorepass=secret&response=json&apiKey=YourAPIKey&signature=YourSignatureHash The command must be URL-encoded. Here is the same example without the URL encoding: diff --git a/docs/en-US/example-response-from-listUsageRecords.xml b/docs/en-US/example-response-from-listUsageRecords.xml index 96615bb0f52..8ed752de55c 100644 --- a/docs/en-US/example-response-from-listUsageRecords.xml +++ b/docs/en-US/example-response-from-listUsageRecords.xml @@ -1,7 +1,31 @@ + + +%BOOK_ENTITIES; +]> + + +
Example response from listUsageRecords - All CloudStack API requests are submitted in the form of a HTTP GET/POST with an associated command and any parameters. A request is composed of the following whether in HTTP or HTTPS: + All &PRODUCT; API requests are submitted in the form of a HTTP GET/POST with an associated command and any parameters. A request is composed of the following whether in HTTP or HTTPS: <listusagerecordsresponse> diff --git a/docs/en-US/export-template.xml b/docs/en-US/export-template.xml index c6db53f179f..05df2c0974b 100644 --- a/docs/en-US/export-template.xml +++ b/docs/en-US/export-template.xml @@ -1,10 +1,29 @@ - %BOOK_ENTITIES; ]> + + +
Exporting Templates - End users and Administrators may export templates from the CloudPlatform. Navigate to the template in the UI and choose the Download function from the Actions menu. + End users and Administrators may export templates from the &PRODUCT;. Navigate to the template in the UI and choose the Download function from the Actions menu.
diff --git a/docs/en-US/external-firewalls-and-load-balancers.xml b/docs/en-US/external-firewalls-and-load-balancers.xml index 239e7b8f584..1452804885d 100644 --- a/docs/en-US/external-firewalls-and-load-balancers.xml +++ b/docs/en-US/external-firewalls-and-load-balancers.xml @@ -1,11 +1,28 @@ - %BOOK_ENTITIES; ]> + + +
External Firewalls and Load Balancers - CloudPlatform is capable of replacing its Virtual Router with an external Juniper SRX device and an optional external NetScaler or F5 load balancer for gateway and load balancing services. In this case, the VMs use the SRX as their gateway. - - -
+ &PRODUCT; is capable of replacing its Virtual Router with an external Juniper SRX device and an optional external NetScaler or F5 load balancer for gateway and load balancing services. In this case, the VMs use the SRX as their gateway. +
diff --git a/docs/en-US/feature-overview.xml b/docs/en-US/feature-overview.xml index 9ea1b783229..7413e43edc6 100644 --- a/docs/en-US/feature-overview.xml +++ b/docs/en-US/feature-overview.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
What Can &PRODUCT; Do? @@ -36,7 +55,7 @@ API and Extensibility - &PRODUCT; provides an API that gives programmatic access to all the management features available in the UI. The API is maintained and documented. This API enables the creation of command line tools and new user interfaces to suit particular needs. See the Developer’s Guide and API Reference, both available at http://docs.cloud.com/CloudStack_Documentation. + &PRODUCT; provides an API that gives programmatic access to all the management features available in the UI. The API is maintained and documented. This API enables the creation of command line tools and new user interfaces to suit particular needs. See the Developer’s Guide and API Reference, both available at http://docs.cloudstack.org/Apache_CloudStack_Documentation. The &PRODUCT; pluggable allocation architecture allows the creation of new types of allocators for the selection of storage and Hosts. See the Allocator Implementation Guide (http://docs.cloudstack.org/CloudStack_Documentation/Allocator_Implementation_Guide). @@ -46,4 +65,4 @@ &PRODUCT; has a number of features to increase the availability of the system. The Management Server itself may be deployed in a multi-node installation where the servers are load balanced. MySQL may be configured to use replication to provide for a manual failover in the event of database loss. For the hosts, &PRODUCT; supports NIC bonding and the use of separate networks for storage as well as iSCSI Multipath. -
\ No newline at end of file +
diff --git a/docs/en-US/firewall-rules.xml b/docs/en-US/firewall-rules.xml index a7a708dd4c7..59231515633 100644 --- a/docs/en-US/firewall-rules.xml +++ b/docs/en-US/firewall-rules.xml @@ -1,16 +1,35 @@ - %BOOK_ENTITIES; ]> + + +
Firewall Rules By default, all incoming traffic to the public IP address is rejected by the firewall. To allow external traffic, you can open firewall ports by specifying firewall rules. You can optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to allow only incoming requests from certain IP addresses. You cannot use firewall rules to open ports for an elastic IP address. When elastic IP is used, outside access is instead controlled through the use of security groups. See . - Firewall rules can be created using the Firewall tab in the Management Server UI. This tab is not displayed by default when CloudPlatform is installed. To display the Firewall tab, the CloudPlatform administrator must set the global configuration parameter firewall.rule.ui.enabled to "true." + Firewall rules can be created using the Firewall tab in the Management Server UI. This tab is not displayed by default when &PRODUCT; is installed. To display the Firewall tab, the &PRODUCT; administrator must set the global configuration parameter firewall.rule.ui.enabled to "true." To create a firewall rule: - Log in to the CloudPlatform UI as an administrator or end user. + Log in to the &PRODUCT; UI as an administrator or end user. In the left navigation, choose Network. Click the name of the network where you want to work with. Click View IP Addresses. diff --git a/docs/en-US/first_ms_node_install.xml b/docs/en-US/first_ms_node_install.xml index f0c2e142f1b..ea9c2f3b45e 100644 --- a/docs/en-US/first_ms_node_install.xml +++ b/docs/en-US/first_ms_node_install.xml @@ -1,7 +1,31 @@ + + +%BOOK_ENTITIES; +]> + + +
Install the First Management Server - Download the CloudStack Management Server onto the host where it will run from one of the following links. If your operating system is CentOS, use the download file for RHEL. + Download the &PRODUCT; Management Server onto the host where it will run from one of the following links. If your operating system is CentOS, use the download file for RHEL. Open-source community: http://sourceforge.net/projects/cloudstack/files/CloudStack Acton/ Commercial customers: https://www.citrix.com/English/ss/downloads/. @@ -9,7 +33,7 @@ - Install the CloudStack packages. You should have a file in the form of “CloudStack-VERSION-N-OSVERSION.tar.gz”. Untar the file and then run the install.sh script inside it. Replace the file and directory names below with those you are using: + Install the &PRODUCT; packages. You should have a file in the form of “CloudStack-VERSION-N-OSVERSION.tar.gz”. Untar the file and then run the install.sh script inside it. Replace the file and directory names below with those you are using: # tar xzf CloudStack-VERSION-N-OSVERSION.tar.gz # cd CloudStack-VERSION-N-OSVERSION # ./install.sh @@ -31,4 +55,4 @@ Continue to Install and Configure the Database. -
\ No newline at end of file +
diff --git a/docs/en-US/globally-configured-limit.xml b/docs/en-US/globally-configured-limit.xml index 5c8bb853db5..95d8895f570 100644 --- a/docs/en-US/globally-configured-limit.xml +++ b/docs/en-US/globally-configured-limit.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Globally Configured Limits In a zone, the guest virtual network has a 24 bit CIDR by default. This limits the guest virtual network to 254 running instances. It can be adjusted as needed, but this must be done before any instances are created in the zone. For example, 10.1.1.0/22 would provide for ~1000 addresses. @@ -77,5 +96,5 @@ - To modify global configuration parameters, use the global configuration screen in the CloudPlatform UI. See Setting Global Configuration Parameters + To modify global configuration parameters, use the global configuration screen in the &PRODUCT; UI. See Setting Global Configuration Parameters
diff --git a/docs/en-US/globally-configured-limits.xml b/docs/en-US/globally-configured-limits.xml index 5c8bb853db5..95d8895f570 100644 --- a/docs/en-US/globally-configured-limits.xml +++ b/docs/en-US/globally-configured-limits.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Globally Configured Limits In a zone, the guest virtual network has a 24 bit CIDR by default. This limits the guest virtual network to 254 running instances. It can be adjusted as needed, but this must be done before any instances are created in the zone. For example, 10.1.1.0/22 would provide for ~1000 addresses. @@ -77,5 +96,5 @@ - To modify global configuration parameters, use the global configuration screen in the CloudPlatform UI. See Setting Global Configuration Parameters + To modify global configuration parameters, use the global configuration screen in the &PRODUCT; UI. See Setting Global Configuration Parameters
diff --git a/docs/en-US/guest-ip-ranges.xml b/docs/en-US/guest-ip-ranges.xml index 97da0448a03..08267a8c1d8 100644 --- a/docs/en-US/guest-ip-ranges.xml +++ b/docs/en-US/guest-ip-ranges.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Guest IP Ranges The IP ranges for guest network traffic are set on a per-account basis by the user. This allows the users to configure their network in a fashion that will enable VPN linking between their guest network and their clients. diff --git a/docs/en-US/guest-network.xml b/docs/en-US/guest-network.xml index e8264b3ab73..c9dee8032e5 100644 --- a/docs/en-US/guest-network.xml +++ b/docs/en-US/guest-network.xml @@ -1,10 +1,29 @@ - %BOOK_ENTITIES; ]> + + +
Guest Network - In a CloudPlatform cloud, guest VMs can communicate with each other using shared infrastructure with the security and user perception that the guests have a private LAN. - The CloudPlatform virtual router is the main component providing networking features for guest traffic. + In a &PRODUCT; cloud, guest VMs can communicate with each other using shared infrastructure with the security and user perception that the guests have a private LAN. + The &PRODUCT; virtual router is the main component providing networking features for guest traffic.
diff --git a/docs/en-US/guest-traffic.xml b/docs/en-US/guest-traffic.xml index 38658e05812..57a441ad816 100644 --- a/docs/en-US/guest-traffic.xml +++ b/docs/en-US/guest-traffic.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Guest Traffic A network can carry guest traffic only between VMs within one zone. Virtual machines in different zones cannot communicate with each other using their IP addresses; they must communicate with each other by routing through a public IP address. diff --git a/docs/en-US/ha-enabled-vm.xml b/docs/en-US/ha-enabled-vm.xml index 63faa82b63c..19666a4db27 100644 --- a/docs/en-US/ha-enabled-vm.xml +++ b/docs/en-US/ha-enabled-vm.xml @@ -1,10 +1,29 @@ - %BOOK_ENTITIES; ]> + + +
HA-Enabled Virtual Machines - The user can specify a virtual machine as HA-enabled. By default, all virtual router VMs and Elastic Load Balancing VMs are automatically configured as HA-enabled. When an HA-enabled VM crashes, CloudPlatform detects the crash and restarts the VM automatically within the same Availability Zone. HA is never performed across different Availability Zones. CloudPlatform has a conservative policy towards restarting VMs and ensures that there will never be two instances of the same VM running at the same time. The Management Server attempts to start the VM on another Host in the same cluster. + The user can specify a virtual machine as HA-enabled. By default, all virtual router VMs and Elastic Load Balancing VMs are automatically configured as HA-enabled. When an HA-enabled VM crashes, &PRODUCT; detects the crash and restarts the VM automatically within the same Availability Zone. HA is never performed across different Availability Zones. &PRODUCT; has a conservative policy towards restarting VMs and ensures that there will never be two instances of the same VM running at the same time. The Management Server attempts to start the VM on another Host in the same cluster. HA features work with iSCSI or NFS primary storage. HA with local storage is not supported. -
\ No newline at end of file +
diff --git a/docs/en-US/ha-for-hosts.xml b/docs/en-US/ha-for-hosts.xml index f555c3e8c41..e395d22e58a 100644 --- a/docs/en-US/ha-for-hosts.xml +++ b/docs/en-US/ha-for-hosts.xml @@ -1,10 +1,29 @@ - %BOOK_ENTITIES; ]> + + +
HA for Hosts - The user can specify a virtual machine as HA-enabled. By default, all virtual router VMs and Elastic Load Balancing VMs are automatically configured as HA-enabled. When an HA-enabled VM crashes, CloudPlatform detects the crash and restarts the VM automatically within the same Availability Zone. HA is never performed across different Availability Zones. CloudPlatform has a conservative policy towards restarting VMs and ensures that there will never be two instances of the same VM running at the same time. The Management Server attempts to start the VM on another Host in the same cluster. + The user can specify a virtual machine as HA-enabled. By default, all virtual router VMs and Elastic Load Balancing VMs are automatically configured as HA-enabled. When an HA-enabled VM crashes, &PRODUCT; detects the crash and restarts the VM automatically within the same Availability Zone. HA is never performed across different Availability Zones. &PRODUCT; has a conservative policy towards restarting VMs and ensures that there will never be two instances of the same VM running at the same time. The Management Server attempts to start the VM on another Host in the same cluster. HA features work with iSCSI or NFS primary storage. HA with local storage is not supported. -
\ No newline at end of file +
diff --git a/docs/en-US/ha-management-server.xml b/docs/en-US/ha-management-server.xml index 27019cc091b..1afebce3bf3 100644 --- a/docs/en-US/ha-management-server.xml +++ b/docs/en-US/ha-management-server.xml @@ -1,11 +1,30 @@ - %BOOK_ENTITIES; ]> + + +
HA for Management Server - The CloudPlatform Management Server should be deployed in a multi-node configuration such that it is not susceptible to individual server failures. The Management Server itself (as distinct from the MySQL database) is stateless and may be placed behind a load balancer. + The &PRODUCT; Management Server should be deployed in a multi-node configuration such that it is not susceptible to individual server failures. The Management Server itself (as distinct from the MySQL database) is stateless and may be placed behind a load balancer. Normal operation of Hosts is not impacted by an outage of all Management Serves. All guest VMs will continue to work. When the Management Server is down, no new VMs can be created, and the end user and admin UI, API, dynamic load distribution, and HA will cease to work.
diff --git a/docs/en-US/host-add.xml b/docs/en-US/host-add.xml index 7591ee5b697..e86760aca43 100644 --- a/docs/en-US/host-add.xml +++ b/docs/en-US/host-add.xml @@ -1,9 +1,28 @@ - %BOOK_ENTITIES; ]> + + +
Adding a Host TODO -
\ No newline at end of file +
diff --git a/docs/en-US/host-allocation.xml b/docs/en-US/host-allocation.xml index 8a56322f839..8a362e6e99c 100644 --- a/docs/en-US/host-allocation.xml +++ b/docs/en-US/host-allocation.xml @@ -1,12 +1,31 @@ - %BOOK_ENTITIES; ]> + + +
Host Allocation The system automatically picks the most appropriate host to run each virtual machine. End users may specify the zone in which the virtual machine will be created. End users do not have control over which host will run the virtual machine instance. - CloudPlatform administrators can specify that certain hosts should have a preference for particular types of guest instances. For example, an administrator could state that a host should have a preference to run Windows guests. The default host allocator will attempt to place guests of that OS type on such hosts first. If no such host is available, the allocator will place the instance wherever there is sufficient physical capacity. - Both vertical and horizontal allocation is allowed. Vertical allocation consumes all the resources of a given host before allocating any guests on a second host. This reduces power consumption in the cloud. Horizontal allocation places a guest on each host in a round-robin fashion. This may yield better performance to the guests in some cases. CloudPlatform also allows an element of CPU over-provisioning as configured by the administrator. Over-provisioning allows the administrator to commit more CPU cycles to the allocated guests than are actually available from the hardware. - CloudPlatform also provides a pluggable interface for adding new allocators. These custom allocators can provide any policy the administrator desires. -
\ No newline at end of file + &PRODUCT; administrators can specify that certain hosts should have a preference for particular types of guest instances. For example, an administrator could state that a host should have a preference to run Windows guests. The default host allocator will attempt to place guests of that OS type on such hosts first. If no such host is available, the allocator will place the instance wherever there is sufficient physical capacity. + Both vertical and horizontal allocation is allowed. Vertical allocation consumes all the resources of a given host before allocating any guests on a second host. This reduces power consumption in the cloud. Horizontal allocation places a guest on each host in a round-robin fashion. This may yield better performance to the guests in some cases. &PRODUCT; also allows an element of CPU over-provisioning as configured by the administrator. Over-provisioning allows the administrator to commit more CPU cycles to the allocated guests than are actually available from the hardware. + &PRODUCT; also provides a pluggable interface for adding new allocators. These custom allocators can provide any policy the administrator desires. +
diff --git a/docs/en-US/hypervisor-support-for-primarystorage.xml b/docs/en-US/hypervisor-support-for-primarystorage.xml index e0fa56b6159..7c547a6683e 100644 --- a/docs/en-US/hypervisor-support-for-primarystorage.xml +++ b/docs/en-US/hypervisor-support-for-primarystorage.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Hypervisor Support for Primary Storage The following table shows storage options and parameters for different hypervisors. @@ -74,10 +93,10 @@ - XenServer uses a clustered LVM system to store VM images on iSCSI and Fiber Channel volumes and does not support over-provisioning in the hypervisor. The storage server itself, however, can support thin-provisioning. As a result the CloudPlatform can still support storage over-provisioning by running on thin-provisioned storage volumes. - KVM supports "Shared Mountpoint" storage. A shared mountpoint is a file system path local to each server in a given cluster. The path must be the same across all Hosts in the cluster, for example /mnt/primary1. This shared mountpoint is assumed to be a clustered filesystem such as OCFS2. In this case the CloudPlatform does not attempt to mount or unmount the storage as is done with NFS. The CloudPlatform requires that the administrator insure that the storage is available - Oracle VM supports both iSCSI and NFS storage. When iSCSI is used with OVM, the CloudPlatform administrator is responsible for setting up iSCSI on the host, including re-mounting the storage after the host recovers from a failure such as a network outage. With other hypervisors, CloudPlatform takes care of mounting the iSCSI target on the host whenever it discovers a connection with an iSCSI server and unmounting the target when it discovers the connection is down. - With NFS storage, CloudPlatform manages the overprovisioning. In this case the global configuration parameter storage.overprovisioning.factor controls the degree of overprovisioning. This is independent of hypervisor type. + XenServer uses a clustered LVM system to store VM images on iSCSI and Fiber Channel volumes and does not support over-provisioning in the hypervisor. The storage server itself, however, can support thin-provisioning. As a result the &PRODUCT; can still support storage over-provisioning by running on thin-provisioned storage volumes. + KVM supports "Shared Mountpoint" storage. A shared mountpoint is a file system path local to each server in a given cluster. The path must be the same across all Hosts in the cluster, for example /mnt/primary1. This shared mountpoint is assumed to be a clustered filesystem such as OCFS2. In this case the &PRODUCT; does not attempt to mount or unmount the storage as is done with NFS. The &PRODUCT; requires that the administrator insure that the storage is available + Oracle VM supports both iSCSI and NFS storage. When iSCSI is used with OVM, the &PRODUCT; administrator is responsible for setting up iSCSI on the host, including re-mounting the storage after the host recovers from a failure such as a network outage. With other hypervisors, &PRODUCT; takes care of mounting the iSCSI target on the host whenever it discovers a connection with an iSCSI server and unmounting the target when it discovers the connection is down. + With NFS storage, &PRODUCT; manages the overprovisioning. In this case the global configuration parameter storage.overprovisioning.factor controls the degree of overprovisioning. This is independent of hypervisor type. Local storage is an option for primary storage for vSphere, XenServer, Oracle VM, and KVM. When the local disk option is enabled, a local disk storage pool is automatically created on each host. To use local storage for the System Virtual Machines (such as the Virtual Router), set system.vm.use.local.storage to true in global configuration. - CloudPlatform supports multiple primary storage pools in a Cluster. For example, you could provision 2 NFS servers in primary storage. Or you could provision 1 iSCSI LUN initially and then add a second iSCSI LUN when the first approaches capacity. + &PRODUCT; supports multiple primary storage pools in a Cluster. For example, you could provision 2 NFS servers in primary storage. Or you could provision 1 iSCSI LUN initially and then add a second iSCSI LUN when the first approaches capacity.
diff --git a/docs/en-US/import-ami.xml b/docs/en-US/import-ami.xml index ad3a74ed7ce..a79eb83bba2 100644 --- a/docs/en-US/import-ami.xml +++ b/docs/en-US/import-ami.xml @@ -1,11 +1,30 @@ - %BOOK_ENTITIES; ]> + + +
Importing Amazon Machine Images - The following procedures describe how to import an Amazon Machine Image (AMI) into CloudPlatform when using the XenServer hypervisor. + The following procedures describe how to import an Amazon Machine Image (AMI) into &PRODUCT; when using the XenServer hypervisor. Assume you have an AMI file and this file is called CentOS_6.2_x64. Assume further that you are working on a CentOS host. If the AMI is a Fedora image, you need to be working on a Fedora host initially. You need to have a XenServer host with a file-based storage repository (either a local ext3 SR or an NFS SR) to convert to a VHD once the image file has been customized on the Centos/Fedora host. When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text. @@ -16,7 +35,7 @@ Install the kernel-xen package into the image. This downloads the PV kernel and ramdisk to the image.# yum -c /mnt/loop/centos54/etc/yum.conf --installroot=/mnt/loop/centos62/ -y install kernel-xen Create a grub entry in /boot/grub/grub.conf.# mkdir -p /mnt/loop/centos62/boot/grub # touch /mnt/loop/centos62/boot/grub/grub.conf -# echo "" > /mnt/loop/centos62/boot/grub/grub.conf +# echo "" > /mnt/loop/centos62/boot/grub/grub.conf Determine the name of the PV kernel that has been installed into the image. # cd /mnt/loop/centos62 @@ -69,7 +88,7 @@ passwd: all authentication tokens updated successfully. PermitRootLogin yes PasswordAuthentication yes - If you need the template to be enabled to reset passwords from the CloudPlatform UI or API, + If you need the template to be enabled to reset passwords from the &PRODUCT; UI or API, install the password change script into the image at this point. See . Unmount and delete loopback mount.# umount /mnt/loop/centos54 diff --git a/docs/en-US/increase-management-server-max-memory.xml b/docs/en-US/increase-management-server-max-memory.xml index d4ac19663e1..16d18e75830 100644 --- a/docs/en-US/increase-management-server-max-memory.xml +++ b/docs/en-US/increase-management-server-max-memory.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Increase Management Server Maximum Memory If the Management Server is subject to high demand, the default maximum JVM memory allocation can be insufficient. To increase the memory: diff --git a/docs/en-US/incremental-snapshots-backup.xml b/docs/en-US/incremental-snapshots-backup.xml index db6de2fc51f..6c7d820dff1 100644 --- a/docs/en-US/incremental-snapshots-backup.xml +++ b/docs/en-US/incremental-snapshots-backup.xml @@ -1,12 +1,31 @@ - %BOOK_ENTITIES; ]> + + +
Incremental Snapshots and Backup Snapshots are created on primary storage where a disk resides. After a snapshot is created, it is immediately backed up to secondary storage and removed from primary storage for optimal utilization of space on primary storage. - CloudPlatform does incremental backups for some hypervisors. When incremental backups are supported, every N backup is a full backup. + &PRODUCT; does incremental backups for some hypervisors. When incremental backups are supported, every N backup is a full backup. diff --git a/docs/en-US/initial-setup-of-external-firewalls-loadbalancers.xml b/docs/en-US/initial-setup-of-external-firewalls-loadbalancers.xml index 8cfdb6a79d4..a2d8db24400 100644 --- a/docs/en-US/initial-setup-of-external-firewalls-loadbalancers.xml +++ b/docs/en-US/initial-setup-of-external-firewalls-loadbalancers.xml @@ -1,11 +1,30 @@ - %BOOK_ENTITIES; ]> + + +
Initial Setup of External Firewalls and Load Balancers - When the first VM is created for a new account, CloudPlatform programs the external firewall and load balancer to work with the VM. The following objects are created on the firewall: + When the first VM is created for a new account, &PRODUCT; programs the external firewall and load balancer to work with the VM. The following objects are created on the firewall: A new logical interface to connect to the account's private VLAN. The interface IP is always the first IP of the account's private subnet (e.g. 10.1.1.1). A source NAT rule that forwards all outgoing traffic from the account's private VLAN to the public Internet, using the account's public IP address as the source address diff --git a/docs/en-US/initialize-and-test.xml b/docs/en-US/initialize-and-test.xml index ed251a7cee4..1c66c40a8ab 100644 --- a/docs/en-US/initialize-and-test.xml +++ b/docs/en-US/initialize-and-test.xml @@ -1,9 +1,28 @@ - %BOOK_ENTITIES; ]> + + +
Initialize and Test TODO -
\ No newline at end of file +
diff --git a/docs/en-US/install-database-on-management-server-node.xml b/docs/en-US/install-database-on-management-server-node.xml index e80f09c2796..2369e7cdb16 100644 --- a/docs/en-US/install-database-on-management-server-node.xml +++ b/docs/en-US/install-database-on-management-server-node.xml @@ -1,14 +1,30 @@ - %BOOK_ENTITIES; ]> + + +
Install the Database on the Management Server Node - This section describes how to install MySQL on the same machine with the Management Server. - This technique is intended for a simple deployment that has a single Management Server node. - If you have a multi-node Management Server deployment, you will typically use a separate node for MySQL. - See . + This section describes how to install MySQL on the same machine with the Management Server. This technique is intended for a simple deployment that has a single Management Server node. If you have a multi-node Management Server deployment, you will typically use a separate node for MySQL. See . If you already have a version of MySQL installed on the Management Server node, make one of the following choices, depending on what version of MySQL it is. The most recent version tested is 5.1.58. diff --git a/docs/en-US/install-database-on-separate-node.xml b/docs/en-US/install-database-on-separate-node.xml index 4dc10bab481..362269c56b4 100644 --- a/docs/en-US/install-database-on-separate-node.xml +++ b/docs/en-US/install-database-on-separate-node.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Install the Database on a Separate Node This section describes how to install MySQL on a standalone machine, separate from the Management Server. @@ -83,4 +102,4 @@ mysql> exit You should see the message “Management Server setup is done.” -
\ No newline at end of file +
diff --git a/docs/en-US/install-management-server-multi-nodes.xml b/docs/en-US/install-management-server-multi-nodes.xml index 3efa4f6e24b..5b8326538a8 100644 --- a/docs/en-US/install-management-server-multi-nodes.xml +++ b/docs/en-US/install-management-server-multi-nodes.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Prepare and Start Additional Management Servers For your second and subsequent Management Servers, you will install the Management Server software, connect it to the database, and set up the OS for the Management Server. @@ -11,8 +30,9 @@ Perform the steps in . Download the Management Server onto the additional host where it will run. Get the software from the following link. - https://www.citrix.com/English/ss/downloads/ - You will need a MyCitrix account. + https://www.citrix.com/English/ss/downloads/ +FIXME + You will need a MyCitrix account. Install the packages. You should have a file in the form of “&PRODUCT;-VERSION-N-OSVERSION.tar.gz”. Untar the file and then run the install.sh script inside it. Replace the file and directory names below with those you are using: @@ -51,4 +71,4 @@ Repeat these steps on each additional Management Server. Be sure to configure a load balancer for the Management Servers. See Management Server Load Balancing. -
\ No newline at end of file +
diff --git a/docs/en-US/install-management-server.xml b/docs/en-US/install-management-server.xml index bfe12838673..c74c866b557 100644 --- a/docs/en-US/install-management-server.xml +++ b/docs/en-US/install-management-server.xml @@ -1,20 +1,37 @@ - %BOOK_ENTITIES; ]> + + +
Install the Management Server on the First Host - The first step in installation, whether you are installing the Management Server on one host or many, is to - install the software on a single node. + The first step in installation, whether you are installing the Management Server on one host or many, is to install the software on a single node. - If you are planning to install the Management Server on multiple nodes - for high availability, do not proceed to the additional nodes yet. That step will come later. + If you are planning to install the Management Server on multiple nodes for high availability, do not proceed to the additional nodes yet. That step will come later. - Download the Management Server onto the host where it will run. - Get the software from the following link. - https://www.citrix.com/English/ss/downloads/ - You will need a MyCitrix account. + Download the Management Server onto the host where it will run. Get the software from the following link. + https://www.citrix.com/English/ss/downloads/ + FIXME + You will need a MyCitrix account. Install the packages. You should have a file in the form of “&PRODUCT;-VERSION-N-OSVERSION.tar.gz”. Untar the file and then run the install.sh script inside it. Replace the file and directory names below with those you are using: diff --git a/docs/en-US/installation-complete.xml b/docs/en-US/installation-complete.xml index 62a069aa825..197c3cf6c38 100644 --- a/docs/en-US/installation-complete.xml +++ b/docs/en-US/installation-complete.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Installation Complete! Next Steps Congratulations! You have now installed &PRODUCT; Management Server and the database it uses to persist system data. diff --git a/docs/en-US/installation-steps-overview.xml b/docs/en-US/installation-steps-overview.xml index 9a95bfb06f0..493ea247a50 100644 --- a/docs/en-US/installation-steps-overview.xml +++ b/docs/en-US/installation-steps-overview.xml @@ -1,13 +1,30 @@ - %BOOK_ENTITIES; ]> + + +
Overview of Installation Steps - - For anything more than a simple trial installation, you will need guidance for a variety of configuration choices. It is strongly recommended that you read the following: - + For anything more than a simple trial installation, you will need guidance for a variety of configuration choices. It is strongly recommended that you read the following: Choosing a Deployment Architecture Choosing a Hypervisor: Supported Features @@ -15,50 +32,51 @@ Storage Setup Best Practices + + Prepare - - 1. Make sure you have the required hardware ready - - - 2. (Optional) Fill out the preparation checklists - - - Install the &PRODUCT; software - - - 3. Install the Management Server (choose single-node or multi-node) - - - 4. Log in to the UI - - - Provision your cloud infrastructure - - - 5. Add a zone. Includes the first pod, cluster, and host - - - 6. Add more pods - - - 7. Add more clusters - - - 8. Add more hosts - - - 9. Add more primary storage - - - 10. Add more secondary storage - - - Try using the cloud - - - 11. Initialization and testing - - -
\ No newline at end of file + Make sure you have the required hardware ready + + + (Optional) Fill out the preparation checklists + + + Install the &PRODUCT; software + + + Install the Management Server (choose single-node or multi-node) + + + Log in to the UI + + + Provision your cloud infrastructure + + + Add a zone. Includes the first pod, cluster, and host + + + Add more pods + + + Add more clusters + + + Add more hosts + + + Add more primary storage + + + Add more secondary storage + + + Try using the cloud + + + Initialization and testing + + +
diff --git a/docs/en-US/installation.xml b/docs/en-US/installation.xml index 6a003fe4af3..3b827278f3f 100644 --- a/docs/en-US/installation.xml +++ b/docs/en-US/installation.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + + Installation diff --git a/docs/en-US/installation_overview.xml b/docs/en-US/installation_overview.xml index 6b743d219c0..84a45037272 100644 --- a/docs/en-US/installation_overview.xml +++ b/docs/en-US/installation_overview.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + + Installation Overview diff --git a/docs/en-US/installation_steps_overview.xml b/docs/en-US/installation_steps_overview.xml index a48cf512be7..2632a4d6243 100644 --- a/docs/en-US/installation_steps_overview.xml +++ b/docs/en-US/installation_steps_overview.xml @@ -1,60 +1,84 @@ -
- Overview of Installation Steps - - For anything more than a simple trial installation, you will need guidance for a variety of configuration choices. It is strongly recommended that you read the following: - - - Choosing a Deployment Architecture - Choosing a Hypervisor: Supported Features - Network Setup - Storage Setup - Best Practices - - - Prepare - - - 1. Make sure you have the required hardware ready - - - 2. (Optional) Fill out the preparation checklists - - - Install the CloudStack software - - - 3. Install the CloudStack Management Server (single-node, or multi-node) - - - 4. Log in to the CloudStack UI - - - Provision your cloud infrastructure - - - 5. Add a zone. Includes the first pod, cluster, and host - - - 6. Add more pods - - - 7. Add more clusters - - - 8. Add more hosts - - - 9. Add more primary storage - - - 10. Add more secondary storage - - - Try using the cloud - - - 11. Initialization and testing - - -
+ + +%BOOK_ENTITIES; +]> + + +
+ Overview of Installation Steps + For anything more than a simple trial installation, you will need + guidance for a variety of configuration choices. It is strongly + recommended that you read the following: + + Choosing a Deployment Architecture + Choosing a Hypervisor: Supported Features + Network Setup + Storage Setup + Best Practices + + + + + Prepare + + Make sure you have the required hardware ready + + + (Optional) Fill out the preparation checklists + + + Install the &PRODUCT; software + + + Install the Management Server (choose single-node or multi-node) + + + Log in to the UI + + + Provision your cloud infrastructure + + + Add a zone. Includes the first pod, cluster, and host + + + Add more pods + + + Add more clusters + + + Add more hosts + + + Add more primary storage + + + Add more secondary storage + + + Try using the cloud + + + Initialization and testing + + +
diff --git a/docs/en-US/introduction.xml b/docs/en-US/introduction.xml index 98653603a38..3a30896dd04 100644 --- a/docs/en-US/introduction.xml +++ b/docs/en-US/introduction.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Introduction diff --git a/docs/en-US/ip-forwarding-firewalling.xml b/docs/en-US/ip-forwarding-firewalling.xml index 6873fcb72ed..61aa6ad7e71 100644 --- a/docs/en-US/ip-forwarding-firewalling.xml +++ b/docs/en-US/ip-forwarding-firewalling.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
IP Forwarding and Firewalling By default, all incoming traffic to the public IP address is rejected. All outgoing traffic from the guests is translated via NAT to the public IP address and is allowed. diff --git a/docs/en-US/ip-load-balancing.xml b/docs/en-US/ip-load-balancing.xml index 638e935d2f4..1ec4663cd19 100644 --- a/docs/en-US/ip-load-balancing.xml +++ b/docs/en-US/ip-load-balancing.xml @@ -1,11 +1,30 @@ - %BOOK_ENTITIES; ]> + + +
IP Load Balancing - The user may choose to associate the same public IP for multiple guests. CloudPlatform implements a TCP-level load balancer with the following policies. + The user may choose to associate the same public IP for multiple guests. &PRODUCT; implements a TCP-level load balancer with the following policies. Round-robin Least connection diff --git a/docs/en-US/ipaddress-usage-record-format.xml b/docs/en-US/ipaddress-usage-record-format.xml index 609fbd6f5ba..e0551fc4f2a 100644 --- a/docs/en-US/ipaddress-usage-record-format.xml +++ b/docs/en-US/ipaddress-usage-record-format.xml @@ -1,19 +1,42 @@ + + +%BOOK_ENTITIES; +]> + + +
IP Address Usage Record Format For IP address usage the following fields exist in a usage record. - account – name of the account - accountid – ID of the account - domainid – ID of the domain in which this account resides - zoneid – Zone where the usage occurred - description – A string describing what the usage record is tracking - usage – String representation of the usage, including the units of usage - usagetype – A number representing the usage type (see Usage Types) - rawusage – A number representing the actual usage in hours - usageid – IP address ID - startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record - issourcenat – Whether source NAT is enabled for the IP address + account - name of the account + accountid - ID of the account + domainid - ID of the domain in which this account resides + zoneid - Zone where the usage occurred + description - A string describing what the usage record is tracking + usage - String representation of the usage, including the units of usage + usagetype - A number representing the usage type (see Usage Types) + rawusage - A number representing the actual usage in hours + usageid - IP address ID + startdate, enddate - The range of time for which the usage is aggregated; see Dates in the Usage Record + issourcenat - Whether source NAT is enabled for the IP address iselastic - True if the IP address is elastic. -
- +
diff --git a/docs/en-US/isolated-networks.xml b/docs/en-US/isolated-networks.xml index a2e27ab7307..13f8aa1d4ca 100644 --- a/docs/en-US/isolated-networks.xml +++ b/docs/en-US/isolated-networks.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Isolated Networks An isolated network can be accessed only by virtual machines of a single account. Isolated networks have the following properties. @@ -11,6 +30,4 @@ There is one network offering for the entire network The network offering can be upgraded or downgraded but it is for the entire network - -
diff --git a/docs/en-US/job-status.xml b/docs/en-US/job-status.xml index 17d602e21db..aebf8af0931 100644 --- a/docs/en-US/job-status.xml +++ b/docs/en-US/job-status.xml @@ -1,3 +1,27 @@ + + +%BOOK_ENTITIES; +]> + + +
Job Status The key to using an asynchronous command is the job ID that is returned immediately once the command has been executed. With the job ID, you can periodically check the job status by making calls to queryAsyncJobResult command. The command will return three possible job status integer values: @@ -6,5 +30,5 @@ 1 - Job has successfully completed. The job will return any successful response values associated with command that was originally executed. 2 - Job has failed to complete. Please check the "jobresultcode" tag for failure reason code and "jobresult" for the failure reason. -
+
diff --git a/docs/en-US/linux-installation.xml b/docs/en-US/linux-installation.xml index 5168d66483a..df7a8b70e3b 100644 --- a/docs/en-US/linux-installation.xml +++ b/docs/en-US/linux-installation.xml @@ -1,35 +1,47 @@ - %BOOK_ENTITIES; ]> + + +
Linux OS Installation Use the following steps to begin the Linux OS installation: Download the script file cloud-set-guest-password: - Linux: - - Windows: - + Linux: + Windows: - Copy this file to /etc/init.d.On some Linux distributions, copy the file to /etc/rc.d/init.d. + Copy this file to /etc/init.d. + On some Linux distributions, copy the file to /etc/rc.d/init.d. Run the following command to make the script executable:chmod +x /etc/init.d/cloud-set-guest-password Depending on the Linux distribution, continue with the appropriate step.On Fedora, CentOS/RHEL, and Debian, run:chkconfig --add cloud-set-guest-password - On Ubuntu with VMware tools, link the script file to the /etc/network/if-up and - /etc/network/if-down folders, and run the script: + On Ubuntu with VMware tools, link the script file to the /etc/network/if-up and /etc/network/if-down folders, and run the script: #ln -s /etc/init.d/cloud-set-guest-password /etc/network/if-up/cloud-set-guest-password #ln -s /etc/init.d/cloud-set-guest-password /etc/network/if-down/cloud-set-guest-password If you are using Ubuntu 11.04, start by creating a directory called /var/lib/dhcp3 on your Ubuntu machine (works around a known issue with this version of Ubuntu). On all Ubuntu versions: Run “sudo update-rc.d cloud-set-guest-password defaults 98”. To test, run "mkpasswd" and check that it is generating a new password. If the “mkpasswd” command does not exist, run "sudo apt-get install whois" (or sudo apt-get install mkpasswd, depending on your Ubuntu version) and repeat. -
diff --git a/docs/en-US/load-balancer-rules.xml b/docs/en-US/load-balancer-rules.xml index 6eb323a2442..8dd7d3b47ba 100644 --- a/docs/en-US/load-balancer-rules.xml +++ b/docs/en-US/load-balancer-rules.xml @@ -1,10 +1,29 @@ - %BOOK_ENTITIES; ]> + + +
Load Balancer Rules - A CloudPlatform user or administrator may create load balancing rules that balance traffic received at a public IP to one or more VMs. A user creates a rule, specifies an algorithm, and assigns the rule to a set of VMs. - If you create load balancing rules while using a network service offering that includes an external load balancer device such as NetScaler, and later change the network service offering to one that uses the CloudPlatform virtual router, you must create a firewall rule on the virtual router for each of your existing load balancing rules so that they continue to function. -
+ A &PRODUCT; user or administrator may create load balancing rules that balance traffic received at a public IP to one or more VMs. A user creates a rule, specifies an algorithm, and assigns the rule to a set of VMs. + If you create load balancing rules while using a network service offering that includes an external load balancer device such as NetScaler, and later change the network service offering to one that uses the &PRODUCT; virtual router, you must create a firewall rule on the virtual router for each of your existing load balancing rules so that they continue to function. +
diff --git a/docs/en-US/loadbalancer-policy-port-forwarding-rule-usage-record-format.xml b/docs/en-US/loadbalancer-policy-port-forwarding-rule-usage-record-format.xml index 1e724fd3e1b..86f54117528 100644 --- a/docs/en-US/loadbalancer-policy-port-forwarding-rule-usage-record-format.xml +++ b/docs/en-US/loadbalancer-policy-port-forwarding-rule-usage-record-format.xml @@ -1,17 +1,40 @@ + + +%BOOK_ENTITIES; +]> + + +
Load Balancer Policy or Port Forwarding Rule Usage Record Format - account – name of the account - accountid – ID of the account - domainid – ID of the domain in which this account resides - zoneid – Zone where the usage occurred - description – A string describing what the usage record is tracking - usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours) - usagetype – A number representing the usage type (see Usage Types) - rawusage – A number representing the actual usage in hours - usageid – ID of the load balancer policy or port forwarding rule - usagetype – A number representing the usage type (see Usage Types) - startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record + account - name of the account + accountid - ID of the account + domainid - ID of the domain in which this account resides + zoneid - Zone where the usage occurred + description - A string describing what the usage record is tracking + usage - String representation of the usage, including the units of usage (e.g. 'Hrs' for hours) + usagetype - A number representing the usage type (see Usage Types) + rawusage - A number representing the actual usage in hours + usageid - ID of the load balancer policy or port forwarding rule + usagetype - A number representing the usage type (see Usage Types) + startdate, enddate - The range of time for which the usage is aggregated; see Dates in the Usage Record -
- +
diff --git a/docs/en-US/log-in-root-admin.xml b/docs/en-US/log-in-root-admin.xml index f57e2642a2a..c0a340a4ea9 100644 --- a/docs/en-US/log-in-root-admin.xml +++ b/docs/en-US/log-in-root-admin.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Logging In as the Root Administrator After the Management Server software is installed and running, you can run the &PRODUCT; user interface. This UI is there to help you provision, view, and manage your cloud infrastructure. diff --git a/docs/en-US/log-in.xml b/docs/en-US/log-in.xml index 385deddb89a..4425aa43b11 100644 --- a/docs/en-US/log-in.xml +++ b/docs/en-US/log-in.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Log In to the UI &PRODUCT; provides a web-based UI that can be used by both administrators and end users. The appropriate version of the UI is displayed depending on the credentials used to log in. The UI is available in popular browsers including IE7, IE8, IE9, Firefox 3.5+, Firefox 4, Safari 4, and Safari 5. The URL is: (substitute your own management server IP address) diff --git a/docs/en-US/long-running-job-events.xml b/docs/en-US/long-running-job-events.xml index 577e1c29628..345a0d0dddb 100644 --- a/docs/en-US/long-running-job-events.xml +++ b/docs/en-US/long-running-job-events.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Long Running Job Events The events log records three types of standard events. diff --git a/docs/en-US/maintain-hypervisors-on-hosts.xml b/docs/en-US/maintain-hypervisors-on-hosts.xml index 7d6bb0e334d..213f078ea2b 100644 --- a/docs/en-US/maintain-hypervisors-on-hosts.xml +++ b/docs/en-US/maintain-hypervisors-on-hosts.xml @@ -1,11 +1,30 @@ - %BOOK_ENTITIES; ]> + + +
Maintaining Hypervisors on Hosts - When running hypervisor software on hosts, be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor’s support channel, and apply patches as soon as possible after they are released. CloudPlatform will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches. + When running hypervisor software on hosts, be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor’s support channel, and apply patches as soon as possible after they are released. &PRODUCT; will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches. The lack of up-do-date hotfixes can lead to data corruption and lost VMs. - (XenServer) For more information, see Highly Recommended Hotfixes for XenServer in the CloudPlatform Knowledge Base -
\ No newline at end of file + (XenServer) For more information, see Highly Recommended Hotfixes for XenServer in the &PRODUCT; Knowledge Base +
diff --git a/docs/en-US/maintenance-mode-for-primary-storage.xml b/docs/en-US/maintenance-mode-for-primary-storage.xml index 83cc245e886..657bc8b7e6e 100644 --- a/docs/en-US/maintenance-mode-for-primary-storage.xml +++ b/docs/en-US/maintenance-mode-for-primary-storage.xml @@ -1,9 +1,28 @@ - %BOOK_ENTITIES; ]> + + +
Maintenance Mode for Primary Storage - Primary storage may be placed into maintenance mode. This is useful, for example, to replace faulty RAM in a storage device. Maintenance mode for a storage device will first stop any new guests from being provisioned on the storage device. Then it will stop all guests that have any volume on that storage device. When all such guests are stopped the storage device is in maintenance mode and may be shut down. When the storage device is online again you may cancel maintenance mode for the device. The CloudPlatform will bring the device back online and attempt to start all guests that were running at the time of the entry into maintenance mode. + Primary storage may be placed into maintenance mode. This is useful, for example, to replace faulty RAM in a storage device. Maintenance mode for a storage device will first stop any new guests from being provisioned on the storage device. Then it will stop all guests that have any volume on that storage device. When all such guests are stopped the storage device is in maintenance mode and may be shut down. When the storage device is online again you may cancel maintenance mode for the device. The &PRODUCT; will bring the device back online and attempt to start all guests that were running at the time of the entry into maintenance mode.
diff --git a/docs/en-US/making-api-request.xml b/docs/en-US/making-api-request.xml index 35ba460e1f0..33c43c5c40f 100644 --- a/docs/en-US/making-api-request.xml +++ b/docs/en-US/making-api-request.xml @@ -1,15 +1,33 @@ - %BOOK_ENTITIES; ]> + + +
Making API Requests - - All CloudStack API requests are submitted in the form of a HTTP GET/POST with an associated command and any parameters. A request is composed of the following whether in HTTP or HTTPS: + All &PRODUCT; API requests are submitted in the form of a HTTP GET/POST with an associated command and any parameters. A request is composed of the following whether in HTTP or HTTPS: - CloudStack API URL: This is the web services API entry point(for example, http://www.cloud.com:8080/client/api) + &PRODUCT; API URL: This is the web services API entry point(for example, http://www.cloud.com:8080/client/api) Command: The web services command you wish to execute, such as start a virtual machine or create a disk volume Parameters: Any additional required or optional parameters for the command @@ -27,9 +45,9 @@ 7. &apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXqjB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ 8. &signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D - The first line is the CloudStack API URL. This is the Cloud instance you wish to interact with. - The second line refers to the command you wish to execute. In our example, we are attempting to deploy a fresh new virtual machine. It is preceded by a (?) to separate itself from the CloudStack API URL. - Lines 3-6 are the parameters for this given command. To see the command and its request parameters, please refer to the appropriate section in the CloudStack API documentation. Each parameter field-value pair (field=value) is preceded by an ampersand character (&). + The first line is the &PRODUCT; API URL. This is the Cloud instance you wish to interact with. + The second line refers to the command you wish to execute. In our example, we are attempting to deploy a fresh new virtual machine. It is preceded by a (?) to separate itself from the &PRODUCT; API URL. + Lines 3-6 are the parameters for this given command. To see the command and its request parameters, please refer to the appropriate section in the &PRODUCT; API documentation. Each parameter field-value pair (field=value) is preceded by an ampersand character (&). Line 7 is the user API Key that uniquely identifies the account. See Signing API Requests on page 7. Line 8 is the signature hash created to authenticate the user account executing the API command. See Signing API Requests on page 7.
diff --git a/docs/en-US/manage-cloud.xml b/docs/en-US/manage-cloud.xml index 564e62144e2..c4c4d6be248 100644 --- a/docs/en-US/manage-cloud.xml +++ b/docs/en-US/manage-cloud.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Managing the Cloud vCenter Maintenance Mode diff --git a/docs/en-US/management-server-install-flow.xml b/docs/en-US/management-server-install-flow.xml index a828d673fa8..dc6c651bc00 100644 --- a/docs/en-US/management-server-install-flow.xml +++ b/docs/en-US/management-server-install-flow.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Management Server Installation diff --git a/docs/en-US/management-server-installation-overview.xml b/docs/en-US/management-server-installation-overview.xml index f733141df65..7c000c5363a 100644 --- a/docs/en-US/management-server-installation-overview.xml +++ b/docs/en-US/management-server-installation-overview.xml @@ -1,8 +1,30 @@ -
+ + +%BOOK_ENTITIES; +]> + + + +
Management Server Installation Overview - - This section describes installing the Management Server. There are two slightly different installation flows, - depending on how many Management Server nodes will be in your cloud: + This section describes installing the Management Server. There are two slightly different installation flows, depending on how many Management Server nodes will be in your cloud: A single Management Server node, with MySQL on the same node. Multiple Management Server nodes, with MySQL on a node separate from the Management Servers. @@ -21,4 +43,3 @@ Prepare the System VM Template
- diff --git a/docs/en-US/management-server-overview.xml b/docs/en-US/management-server-overview.xml index 40cbd53fe53..12a5121e7e5 100644 --- a/docs/en-US/management-server-overview.xml +++ b/docs/en-US/management-server-overview.xml @@ -1,57 +1,58 @@ - %BOOK_ENTITIES; ]> + + +
Management Server Overview - - The Management Server is the &PRODUCT; software that manages cloud resources. By interacting with the Management Server through its UI or API, you can configure and manage your cloud infrastructure. + The Management Server is the &PRODUCT; software that manages cloud resources. By interacting with the Management Server through its UI or API, you can configure and manage your cloud infrastructure. - - The Management Server runs on a dedicated server or VM. It controls allocation of virtual machines to hosts and assigns storage and IP addresses to the virtual machine instances. The Management Server runs in a Tomcat container and requires a MySQL database for persistence. + The Management Server runs on a dedicated server or VM. It controls allocation of virtual machines to hosts and assigns storage and IP addresses to the virtual machine instances. The Management Server runs in a Tomcat container and requires a MySQL database for persistence. - - The machine must meet the system requirements described in System Requirements. + The machine must meet the system requirements described in System Requirements. - - The Management Server: + The Management Server: - - Provides the web user interface for the administrator and a reference user interface for end users. - + Provides the web user interface for the administrator and a reference user interface for end users. - - Provides the APIs for &PRODUCT;. - + Provides the APIs for &PRODUCT;. - - Manages the assignment of guest VMs to particular hosts. - + Manages the assignment of guest VMs to particular hosts. - - Manages the assignment of public and private IP addresses to particular accounts. - + Manages the assignment of public and private IP addresses to particular accounts. - - Manages the allocation of storage to guests as virtual disks. - + Manages the allocation of storage to guests as virtual disks. - - Manages snapshots, templates, and ISO images, possibly replicating them across data centers. - + Manages snapshots, templates, and ISO images, possibly replicating them across data centers. - - Provides a single point of configuration for the cloud. - + Provides a single point of configuration for the cloud.
diff --git a/docs/en-US/management_server_multi_node_install.xml b/docs/en-US/management_server_multi_node_install.xml index ba925bddf50..5b6555a3151 100644 --- a/docs/en-US/management_server_multi_node_install.xml +++ b/docs/en-US/management_server_multi_node_install.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + + Management Server Multi-Node Installation diff --git a/docs/en-US/management_server_overview.xml b/docs/en-US/management_server_overview.xml index 19b32d726bb..6b5d6fed3eb 100644 --- a/docs/en-US/management_server_overview.xml +++ b/docs/en-US/management_server_overview.xml @@ -1,52 +1,53 @@ + + +%BOOK_ENTITIES; +]> + +
Management Server Overview - - The Management Server is the CloudStack software that manages cloud resources. By interacting with the Management Server through its UI or API, you can configure and manage your cloud infrastructure. - - - The Management Server runs on a dedicated server or VM. It controls allocation of virtual machines to hosts and assigns storage and IP addresses to the virtual machine instances. The CloudStack Management Server runs in a Tomcat container and requires a MySQL database for persistence. - - - The machine must meet the system requirements described in System Requirements. - - - The Management Server: - + The Management Server is the &PRODUCT; software that manages cloud resources. By interacting with the Management Server through its UI or API, you can configure and manage your cloud infrastructure. + The Management Server runs on a dedicated server or VM. It controls allocation of virtual machines to hosts and assigns storage and IP addresses to the virtual machine instances. The &PRODUCT; Management Server runs in a Tomcat container and requires a MySQL database for persistence. + The machine must meet the system requirements described in System Requirements. + The Management Server: - - Provides the web user interface for the administrator and a reference user interface for end users. - + Provides the web user interface for the administrator and a reference user interface for end users. - - Provides the APIs for the CloudStack platform. - + Provides the APIs for the &PRODUCT; platform. - - Manages the assignment of guest VMs to particular hosts. - + Manages the assignment of guest VMs to particular hosts. - - Manages the assignment of public and private IP addresses to particular accounts. - + Manages the assignment of public and private IP addresses to particular accounts. - - Manages the allocation of storage to guests as virtual disks. - + Manages the allocation of storage to guests as virtual disks. - - Manages snapshots, templates, and ISO images, possibly replicating them across data centers. - + Manages snapshots, templates, and ISO images, possibly replicating them across data centers. - - Provides a single point of configuration for the cloud. - + Provides a single point of configuration for the cloud.
diff --git a/docs/en-US/manual-live-migration.xml b/docs/en-US/manual-live-migration.xml index 63a80e8ef0a..a82b52ed1e6 100644 --- a/docs/en-US/manual-live-migration.xml +++ b/docs/en-US/manual-live-migration.xml @@ -1,3 +1,27 @@ + + +%BOOK_ENTITIES; +]> + + +
Moving VMs Between Hosts (Manual Live Migration) The CloudPlatform administrator can move a running VM from one host to another without interrupting service to users or going into maintenance mode. This is called manual live migration, and can be done under the following conditions: diff --git a/docs/en-US/max-result-page-returned.xml b/docs/en-US/max-result-page-returned.xml index ffbe60bbd9b..3c12415bd72 100644 --- a/docs/en-US/max-result-page-returned.xml +++ b/docs/en-US/max-result-page-returned.xml @@ -1,3 +1,27 @@ + + +%BOOK_ENTITIES; +]> + + +
Maximum Result Pages Returned diff --git a/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml b/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml index 417eb71d46b..0d545bab0b6 100644 --- a/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml +++ b/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml @@ -1,14 +1,33 @@ - %BOOK_ENTITIES; ]> + + +
Migrating a Data Disk Volume to a New Storage Pool - Log in to the CloudPlatform UI as a user or admin. + Log in to the &PRODUCT; UI as a user or admin. Detach the data disk from the VM. See Detaching and Moving Volumes (but skip the “reattach” step at the end. You will do that after migrating to new storage). - Call the CloudPlatform API command migrateVolume and pass in the volume ID and the ID of any storage pool in the zone. + Call the &PRODUCT; API command migrateVolume and pass in the volume ID and the ID of any storage pool in the zone. Watch for the volume status to change to Migrating, then back to Ready. Attach the volume to any desired VM running in the same cluster as the new storage server. See Attaching a Volume diff --git a/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml b/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml index dbf6138356e..0ce06b0223d 100644 --- a/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml +++ b/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml @@ -1,16 +1,35 @@ - %BOOK_ENTITIES; ]> + + +
Migrating a VM Root Volume to a New Storage Pool When migrating the root disk volume, the VM must first be stopped, and users can not access the VM. After migration is complete, the VM can be restarted. - Log in to the CloudPlatform UI as a user or admin. + Log in to the &PRODUCT; UI as a user or admin. Detach the data disk from the VM. See Detaching and Moving Volumes (but skip the “reattach” step at the end. You will do that after migrating to new storage). Stop the VM. - Call the CloudPlatform API command migrateVirtualMachine with the ID of the VM to migrate and the IDs of a destination host and destination storage pool in the same zone. + Call the &PRODUCT; API command migrateVirtualMachine with the ID of the VM to migrate and the IDs of a destination host and destination storage pool in the same zone. Watch for the VM status to change to Migrating, then back to Stopped. Restart the VM. diff --git a/docs/en-US/minimum-system-requirements.xml b/docs/en-US/minimum-system-requirements.xml index 3973c304233..6f8b4ab9be1 100644 --- a/docs/en-US/minimum-system-requirements.xml +++ b/docs/en-US/minimum-system-requirements.xml @@ -1,17 +1,32 @@ - %BOOK_ENTITIES; ]> + + +
Minimum System Requirements
Management Server, Database, and Storage System Requirements - - The machines that will run the Management Server and MySQL database must meet the following requirements. - The same machines can also be used to provide primary and secondary storage, such as via localdisk or NFS. - The Management Server may be placed on a virtual machine. - + The machines that will run the Management Server and MySQL database must meet the following requirements. The same machines can also be used to provide primary and secondary storage, such as via localdisk or NFS. The Management Server may be placed on a virtual machine. Operating system: @@ -51,8 +66,7 @@ Oracle VM (OVM) Installation and Configuration - - Be sure you fulfill the additional hypervisor requirements and installation steps provided in this Guide. Hypervisor hosts must be properly prepared to work with CloudStack. For example, the requirements for XenServer are listed under Citrix XenServer Installation. + Be sure you fulfill the additional hypervisor requirements and installation steps provided in this Guide. Hypervisor hosts must be properly prepared to work with CloudStack. For example, the requirements for XenServer are listed under Citrix XenServer Installation.
diff --git a/docs/en-US/modify-delete-service-offerings.xml b/docs/en-US/modify-delete-service-offerings.xml index 601a8bd6211..b917af48252 100644 --- a/docs/en-US/modify-delete-service-offerings.xml +++ b/docs/en-US/modify-delete-service-offerings.xml @@ -1,8 +1,27 @@ - %BOOK_ENTITIES; ]> + + +
Modifying or Deleting a Service Offering Service offerings cannot be changed once created. This applies to both compute offerings and disk offerings. diff --git a/docs/en-US/multi_node_overview.xml b/docs/en-US/multi_node_overview.xml index b7533405804..92904cff969 100644 --- a/docs/en-US/multi_node_overview.xml +++ b/docs/en-US/multi_node_overview.xml @@ -1,3 +1,27 @@ + + +%BOOK_ENTITIES; +]> + + +
Management Server Multi-Node Installation Overview diff --git a/docs/en-US/multiple-system-vm-vmware.xml b/docs/en-US/multiple-system-vm-vmware.xml index a809b67eb50..014dfa1f329 100644 --- a/docs/en-US/multiple-system-vm-vmware.xml +++ b/docs/en-US/multiple-system-vm-vmware.xml @@ -1,9 +1,28 @@ - %BOOK_ENTITIES; ]> + + +
Multiple System VM Support for VMware - Every CloudPlatform zone has single System VM for template processing tasks such as downloading templates, uploading templates, and uploading ISOs. In a zone where VMware is being used, additional System VMs can be launched to process VMware-specific tasks such as taking snapshots and creating private templates. The CloudPlatform management server launches additional System VMs for VMware-specific tasks as the load increases. The management server monitors and weights all commands sent to these System VMs and performs dynamic load balancing and scaling-up of more System VMs. + Every &PRODUCT; zone has single System VM for template processing tasks such as downloading templates, uploading templates, and uploading ISOs. In a zone where VMware is being used, additional System VMs can be launched to process VMware-specific tasks such as taking snapshots and creating private templates. The &PRODUCT; management server launches additional System VMs for VMware-specific tasks as the load increases. The management server monitors and weights all commands sent to these System VMs and performs dynamic load balancing and scaling-up of more System VMs.
diff --git a/docs/en-US/release-notes-3.0.4.ent b/docs/en-US/release-notes-3.0.4.ent new file mode 100644 index 00000000000..65ecebd8c7a --- /dev/null +++ b/docs/en-US/release-notes-3.0.4.ent @@ -0,0 +1,4 @@ + + + + diff --git a/docs/publican-cloudstack/defaults.cfg b/docs/publican-cloudstack/defaults.cfg new file mode 100644 index 00000000000..91d20b86f35 --- /dev/null +++ b/docs/publican-cloudstack/defaults.cfg @@ -0,0 +1,6 @@ +# Config::Simple 4.59 +# Thu Aug 11 14:07:41 2011 + +doc_url: "http://docs.cloudstack.org" +prod_url: "http://cloudstack.org" + diff --git a/docs/publican-cloudstack/overrides.cfg b/docs/publican-cloudstack/overrides.cfg new file mode 100644 index 00000000000..2954532d4c3 --- /dev/null +++ b/docs/publican-cloudstack/overrides.cfg @@ -0,0 +1,5 @@ +# Config::Simple 4.59 +# Thu Aug 11 14:07:41 2011 + +strict: 0 + diff --git a/docs/publican-cloudstack/publican.cfg b/docs/publican-cloudstack/publican.cfg new file mode 100644 index 00000000000..48bdc0b18be --- /dev/null +++ b/docs/publican-cloudstack/publican.cfg @@ -0,0 +1,9 @@ +# Config::Simple 4.59 +# Thu Aug 11 14:07:41 2011 + +version: "0.1" +xml_lang: "en-US" +release: 0 +type: brand +brand: cloudstack + diff --git a/docs/runbook/zh-CN/Author_Group.po b/docs/runbook/zh-CN/Author_Group.po new file mode 100644 index 00000000000..9643c110d67 --- /dev/null +++ b/docs/runbook/zh-CN/Author_Group.po @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +msgid "" +msgstr "" +"Project-Id-Version: 0\n" +"POT-Creation-Date: 2012-08-04T04:05:40\n" +"PO-Revision-Date: 2012-08-04T04:05:40\n" +"Last-Translator: Automatically generated\n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: application/x-publican; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Tag: firstname +#, no-c-format +msgid "Apache" +msgstr "" + +#. Tag: surname +#, no-c-format +msgid "CloudStack" +msgstr "" + diff --git a/docs/runbook/zh-CN/Book_Info.po b/docs/runbook/zh-CN/Book_Info.po new file mode 100644 index 00000000000..66481967b13 --- /dev/null +++ b/docs/runbook/zh-CN/Book_Info.po @@ -0,0 +1,54 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +msgid "" +msgstr "" +"Project-Id-Version: Apache CloudStack Runbook\n" +"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" +"POT-Creation-Date: 2012-07-10T15:13:22\n" +"PO-Revision-Date: 2012-08-03 06:28+0000\n" +"Last-Translator: micexia \n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#. Tag: title +#, no-c-format +msgid "Runbook" +msgstr "运行手册" + +#. Tag: subtitle +#, no-c-format +msgid "Prescriptive instructions for deploying Apache CloudStack" +msgstr "部署Apache CloudStack规范指导" + +#. Tag: productname +#, no-c-format +msgid "Apache CloudStack" +msgstr "Apache CloudStack" + +#. Tag: para +#, no-c-format +msgid "" +"These runbooks are designed to provide a strict environment to guarantee a " +"higher degree of success in initial deployments of Apache CloudStack. All of" +" the elements of the environment will be provided to you. Apache CloudStack " +"is capable of much more complex configurations, but they are beyond the " +"scope of this document." +msgstr "本安装手册用于提供在严格指定的环境中安装Apache CloudStack,以保证初始部署的高成功率,所有相关的资源和环境都会提供给您。Apache CloudStack的配置可以非常复杂,但这超出了本文档的范围。" diff --git a/docs/runbook/zh-CN/Chapter.po b/docs/runbook/zh-CN/Chapter.po new file mode 100644 index 00000000000..2d9cfae3cb6 --- /dev/null +++ b/docs/runbook/zh-CN/Chapter.po @@ -0,0 +1,65 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: 0\n" +"POT-Creation-Date: 2012-08-04T04:05:40\n" +"PO-Revision-Date: 2012-08-04T04:05:40\n" +"Last-Translator: Automatically generated\n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: application/x-publican; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Tag: title +#, no-c-format +msgid "Test Chapter" +msgstr "" + +#. Tag: para +#, no-c-format +msgid "This is a test paragraph" +msgstr "" + +#. Tag: title +#, no-c-format +msgid "Test Section 1" +msgstr "" + +#. Tag: para +#, no-c-format +msgid "This is a test paragraph in a section" +msgstr "" + +#. Tag: title +#, no-c-format +msgid "Test Section 2" +msgstr "" + +#. Tag: para +#, no-c-format +msgid "This is a test paragraph in Section 2" +msgstr "" + +#. Tag: para +#, no-c-format +msgid "listitem text" +msgstr "" + diff --git a/docs/runbook/zh-CN/Environment.po b/docs/runbook/zh-CN/Environment.po new file mode 100644 index 00000000000..63bb53549ae --- /dev/null +++ b/docs/runbook/zh-CN/Environment.po @@ -0,0 +1,494 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +msgid "" +msgstr "" +"Project-Id-Version: Apache CloudStack Runbook\n" +"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" +"POT-Creation-Date: 2012-07-10T15:13:22\n" +"PO-Revision-Date: 2012-08-03 05:51+0000\n" +"Last-Translator: micexia \n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#. Tag: title +#, no-c-format +msgid "Environment" +msgstr "环境准备" + +#. Tag: para +#, no-c-format +msgid "" +"Before you begin, you need to prepare the environment before you install " +"CloudStack. We will go over the steps to prepare now." +msgstr "在开始安装Cloudstack之前,需要准备环境,以下将详细分步骤描述各准备环节。" + +#. Tag: title +#, no-c-format +msgid "Operating System" +msgstr "操作系统" + +#. Tag: para +#, no-c-format +msgid "" +"Using the CentOS 6.2 x86_64 minimal install ISO, you'll need to install " +"CentOS on your hardware. The defaults will generally be acceptable for this " +"installation." +msgstr "使用 CentOS 6.2 x86_64 minimal install 镜像,在物理主机上安装CentOS,安装过程中接受默认选项。" + +#. Tag: para +#, no-c-format +msgid "" +"Once this installation is complete, you'll want to connect to your freshly " +"installed machine via SSH as the root user. Note that you should not allow " +"root logins in a production environment, so be sure to turn off remote " +"logins once you have finished the installation and configuration." +msgstr "当安装完成后,需要以root身份通过SSH连接新安装的主机,注意不要以root账户登录生产环境,请在完成安装和配置后关闭远程登录。" + +#. Tag: title +#, no-c-format +msgid "Configuring the network" +msgstr "配置网络" + +#. Tag: para +#, no-c-format +msgid "" +"By default the network will not come up on your hardware and you will need " +"to configure it to work in your environment. Since we specified that there " +"will be no DHCP server in this environment we will be manually configuring " +"your network interface. We will assume, for the purposes of this exercise, " +"that eth0 is the only network interface that will be connected and used." +msgstr "一般情况下网络不会在新安装的主机上启用,您需要根据环境进行配置。由于网络中不能存在任何DHCP服务器,您需要手工配置网络接口。为了快速简化安装的目的,这里假定主机上只有eth0一个网络接口。" + +#. Tag: para +#, no-c-format +msgid "" +"Connecting via the console you should login as root. Check the file " +"/etc/sysconfig/network-scripts/ifcfg-eth0, it will look" +" like this by default:" +msgstr "以root身份连接主机控制台,检查文件 /etc/sysconfig/network-scripts/ifcfg-eth0,默认情况,其内容如下所示:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"DEVICE=\"eth0\"\n" +"HWADDR=\"52:54:00:B9:A6:C0\"\n" +"NM_CONTROLLED=\"yes\"\n" +"ONBOOT=\"no\"\n" +" " +msgstr "\nDEVICE=\"eth0\"\nHWADDR=\"52:54:00:B9:A6:C0\"\nNM_CONTROLLED=\"yes\"\nONBOOT=\"no\"\n " + +#. Tag: para +#, no-c-format +msgid "" +"Unfortunately, this configuration will not permit you to connect to the " +"network, and is also unsuitable for our purposes with CloudStack. We want to" +" configure that file so that it specifies the IP address, netmask, etc., as " +"shown in the following example:" +msgstr "但是根据以上配置您无法连接到网络,对于Cloudstack也同样不适合;您需修改配置文件,指定IP地址,网络掩码等信息,如下例所示:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"DEVICE=eth0\n" +"HWADDR=52:54:00:B9:A6:C0\n" +"NM_CONTROLLED=no\n" +"ONBOOT=yes\n" +"BOOTPROTO=none\n" +"IPADDR=172.16.10.2\n" +"NETMASK=255.255.255.0\n" +"GATEWAY=172.16.10.1\n" +" " +msgstr "\nDEVICE=eth0\nHWADDR=52:54:00:B9:A6:C0\nNM_CONTROLLED=no\nONBOOT=yes\nBOOTPROTO=none\nIPADDR=172.16.10.2\nNETMASK=255.255.255.0\nGATEWAY=172.16.10.1\n " + +#. Tag: title +#, no-c-format +msgid "IP Addressing" +msgstr "IP地址" + +#. Tag: para +#, no-c-format +msgid "" +"Throughout this document we are assuming that you will have a /24 network " +"for your CloudStack implementation. This can be any RFC 1918 network. " +"However, we are assuming that you will match the machine address that we are" +" using. Thus we may use " +"172.16.10.2 and because " +"you might be using the 192.168.55.0/24 network you would use " +"192.168.55.2" +msgstr "本文档假定您提供一个C类网络供Cloudstack使用;该网络可以是任何RFC 1918兼容的网络,但这里假定您使用的IP地址的最后一段与手册中使用的匹配,例如手册中使用172.16.10.2,如果您使用192.168.55.0/24,请使用192.168.55.2" + +#. Tag: title +#, no-c-format +msgid "Hardware Addresses" +msgstr "物理地址" + +#. Tag: para +#, no-c-format +msgid "" +"You should not use the hardware address (aka MAC address) from our example " +"for your configuration. It is network interface specific, so you should keep" +" the address already provided in the HWADDR directive." +msgstr "请不要使用网络配置例子中的MAC地址,该地址每个网卡唯一,请保留您配置文件中HWADDR段已提供的内容。" + +#. Tag: para +#, no-c-format +msgid "" +"Now that we have the configuration files properly set up, we need to run a " +"few commands to start up the network" +msgstr "配置文件准备完毕后,需要运行命令启动网络。" + +#. Tag: screen +#, no-c-format +msgid "" +"# chkconfig network " +"on" +msgstr "# chkconfig network on" + +#. Tag: screen +#, no-c-format +msgid "" +"# service network " +"start" +msgstr "# service network start" + +#. Tag: para +#, no-c-format +msgid "" +"This should bring the network up successfully, but we now need to enable " +"name resolution. To do that we will edit " +"/etc/resolv.conf. These instructions will add one of " +"the nameservers from Google, though you are free to add a local nameserver " +"if you wish. Your /etc/resolv.conf should modified to " +"look like:" +msgstr "以上命令应该会成功启用网络,接下来需要启用域名解析,编辑文件/etc/resolv.conf,以下指令将添加Google的DNS服务器,当然您也可以根据需要添加本地的域名服务器,/etc/resolv.conf 应更改为如下内容:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"nameserver 8.8.8.8\n" +" " +msgstr "\nnameserver 8.8.8.8\n " + +#. Tag: title +#, no-c-format +msgid "Hostname" +msgstr "主机名" + +#. Tag: para +#, no-c-format +msgid "" +"Cloudstack requires that the hostname be properly set. If you used the " +"default options in the installation, then your hostname is currently set to " +"localhost.localdomain. To test this we will run:" +msgstr "Cloudstack要求正确设置主机名,如果按照时您接受了默认选项,主机名为localhost.localdomain,输入如下命令可以进行验证" + +#. Tag: screen +#, no-c-format +msgid "# hostname --fqdn" +msgstr "# hostname --fqdn" + +#. Tag: para +#, no-c-format +msgid "At this point it will likely return:" +msgstr "此时应会返回:" + +#. Tag: screen +#, no-c-format +msgid "localhost" +msgstr "localhost" + +#. Tag: para +#, no-c-format +msgid "" +"To rectify this situation - we'll set the hostname by editing the " +"/etc/hosts file so that it follows a similar format to " +"this example:" +msgstr "为了纠正这个问题,需设置主机名,通过编辑/etc/hosts 文件,将其更改为类似如下内容:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4\n" +"172.16.10.2 srvr1.cloud.priv\n" +msgstr "\n127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4\n172.16.10.2 srvr1.cloud.priv\n" + +#. Tag: para +#, no-c-format +msgid "" +"After you've modified that file, go ahead and restart the network using:" +msgstr "更改配置文件后,重启网络服务:" + +#. Tag: screen +#, no-c-format +msgid "" +"# service network " +"restart" +msgstr "# service network restart" + +#. Tag: para +#, no-c-format +msgid "" +"Now recheck with the hostname --fqdn command and ensure " +"that it returns a FQDN response" +msgstr "通过命令hostname --fqdn重新检查主机名,此时应返回一个FQDN格式结果。" + +#. Tag: title +#, no-c-format +msgid "SELinux" +msgstr "SELinux" + +#. Tag: para +#, no-c-format +msgid "" +"At the moment, for CloudStack to work properly SELinux must be set to " +"permissive. We want to both configure this for future boots and modify it in" +" the current running system." +msgstr "Cloudstack当前版本需要SELinux设置为permissive才能正常工作,你需要改变当前配置,同时将该配置持久化,使其在主机重启后仍然生效。" + +#. Tag: para +#, no-c-format +msgid "" +"To configure SELinux to be permissive in the running system we need to run " +"the following command:" +msgstr "将SELinux配置为permissive需执行如下命令:" + +#. Tag: screen +#, no-c-format +msgid "" +"# setenforce 0" +msgstr "# setenforce 0" + +#. Tag: para +#, no-c-format +msgid "" +"To ensure that it remains in that state we need to configure the file " +"/etc/selinux/config to reflect the permissive state, as" +" shown in this example:" +msgstr "为确保其持久生效需更改配置文件/etc/selinux/config,设置为permissive,如下例所示:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"\n" +"# This file controls the state of SELinux on the system.\n" +"# SELINUX= can take one of these three values:\n" +"# enforcing - SELinux security policy is enforced.\n" +"# permissive - SELinux prints warnings instead of enforcing.\n" +"# disabled - No SELinux policy is loaded.\n" +"SELINUX=permissive\n" +"# SELINUXTYPE= can take one of these two values:\n" +"# targeted - Targeted processes are protected,\n" +"# mls - Multi Level Security protection.\n" +"SELINUXTYPE=targeted\n" +" " +msgstr "\n\n# This file controls the state of SELinux on the system.\n# SELINUX= can take one of these three values:\n# enforcing - SELinux security policy is enforced.\n# permissive - SELinux prints warnings instead of enforcing.\n# disabled - No SELinux policy is loaded.\nSELINUX=permissive\n# SELINUXTYPE= can take one of these two values:\n# targeted - Targeted processes are protected,\n# mls - Multi Level Security protection.\nSELINUXTYPE=targeted\n " + +#. Tag: title +#, no-c-format +msgid "NTP" +msgstr "NTP" + +#. Tag: para +#, no-c-format +msgid "" +"NTP configuration is a necessity for keeping all of the clocks in your cloud" +" servers in sync. However, NTP is not installed by default. So we'll install" +" and and configure NTP at this stage. Installation is accomplished as " +"follows:" +msgstr "为了同步云平台中主机的时间,需要配置NTP,但NTP默认没有安装。因此需要先安装NTP,然后进行配置。通过以下命令进行安装:" + +#. Tag: screen +#, no-c-format +msgid "" +"# yum install ntp" +msgstr "# yum install ntp" + +#. Tag: para +#, no-c-format +msgid "" +"The actual default configuration is fine for our purposes, so we merely need" +" to enable it and set it to start on boot as follows:" +msgstr "使用实际的默认配置文件即可满足本文档的要求,仅需启用NTP并设置为开机启动,如下所示:" + +#. Tag: screen +#, no-c-format +msgid "" +"# chkconfig ntpd " +"on" +msgstr "# chkconfig ntpd on" + +#. Tag: screen +#, no-c-format +msgid "" +"# service ntpd " +"start" +msgstr "# service ntpd start" + +#. Tag: title +#, no-c-format +msgid "NFS" +msgstr "NFS" + +#. Tag: para +#, no-c-format +msgid "" +"Our configuration is going to use NFS for both primary and secondary " +"storage. We are going to go ahead and setup two NFS shares for those " +"purposes. We'll start out by installing nfs-" +"utils." +msgstr "本文档将配置的环境使用NFS做为主存储和次要存储,需配置两个NFS共享目挂载点,在此之前需先安装nfs-utils:" + +#. Tag: screen +#, no-c-format +msgid "" +"# yum install nfs-" +"utils" +msgstr "# yum install nfs-utils" + +#. Tag: para +#, no-c-format +msgid "" +"We now need to configure NFS to serve up two different shares. This is " +"handled comparatively easily in the /etc/exports file. " +"You should ensure that it has the following content:" +msgstr "接下来需配置NFS提供两个不同的挂载点,通过编辑/etc/exports文件即可简单实现,请确保其内容如下所示:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"/secondary *(rw,async,no_root_squash)\n" +"/primary *(rw,async,no_root_squash)\n" +" " +msgstr "\n/secondary *(rw,async,no_root_squash)\n/primary *(rw,async,no_root_squash)\n " + +#. Tag: para +#, no-c-format +msgid "" +"You will note that we specified two directories that don't exist (yet) on " +"the system. We'll go ahead and create those directories and set permissions " +"appropriately on them with the following commands:" +msgstr "注意配置文件中指定了两个系统中不存在的目录,下面需要创建这些目录并且设置合适的权限,对应的命令如下所示:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"# mkdir /primary\n" +"# mkdir /secondary\n" +"# chmod 777 /primary\n" +"# chmod 777 /secondary\n" +" " +msgstr "\n# mkdir /primary\n# mkdir /secondary\n# chmod 777 /primary\n# chmod 777 /secondary\n " + +#. Tag: para +#, no-c-format +msgid "" +"CentOS 6.x releases use NFSv4 by default. NFSv4 requires that domain setting" +" matches on all clients. In our case, the domain is cloud.priv, so ensure " +"that the domain setting in /etc/idmapd.conf is " +"uncommented and set as follows:" +msgstr "CentOS 6.x 版本默认使用NFSv4,NFSv4要求所有客户端的域设置匹配,这里设置为cloud.priv为例,请确保文件/etc/idmapd.conf中的域设置没有被注释掉,并设置为以下内容:" + +#. Tag: screen +#, no-c-format +msgid "Domain = cloud.priv" +msgstr "Domain = cloud.priv" + +#. Tag: para +#, no-c-format +msgid "" +"Now you'll need uncomment the configuration values in the file " +"/etc/sysconfig/nfs" +msgstr "然后您需要取消/etc/sysconfig/nfs文件中以下配置项的注释:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"LOCKD_TCPPORT=32803\n" +"LOCKD_UDPPORT=32769\n" +"MOUNTD_PORT=892\n" +"RQUOTAD_PORT=875\n" +"STATD_PORT=662\n" +"STATD_OUTGOING_PORT=2020\n" +" " +msgstr "\nLOCKD_TCPPORT=32803\nLOCKD_UDPPORT=32769\nMOUNTD_PORT=892\nRQUOTAD_PORT=875\nSTATD_PORT=662\nSTATD_OUTGOING_PORT=2020\n " + +#. Tag: para +#, no-c-format +msgid "" +"Now we need to configure the firewall to permit incoming NFS connections. " +"Edit the file /etc/sysconfig/iptables" +msgstr "接下来还需配置防火墙,允许NFS连接。编辑文件/etc/sysconfig/iptables:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"-A INPUT -m state --state NEW -p udp --dport 111 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p tcp --dport 111 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p tcp --dport 2049 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p tcp --dport 32803 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p udp --dport 32769 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p tcp --dport 892 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p udp --dport 892 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p tcp --dport 875 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p udp --dport 875 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p tcp --dport 662 -j ACCEPT\n" +"-A INPUT -m state --state NEW -p udp --dport 662 -j ACCEPT\n" +" " +msgstr "\n-A INPUT -m state --state NEW -p udp --dport 111 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 111 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 2049 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 32803 -j ACCEPT\n-A INPUT -m state --state NEW -p udp --dport 32769 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 892 -j ACCEPT\n-A INPUT -m state --state NEW -p udp --dport 892 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 875 -j ACCEPT\n-A INPUT -m state --state NEW -p udp --dport 875 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 662 -j ACCEPT\n-A INPUT -m state --state NEW -p udp --dport 662 -j ACCEPT\n " + +#. Tag: para +#, no-c-format +msgid "Now you can restart the iptables service with the following command:" +msgstr "通过以下命令重新启动iptables服务:" + +#. Tag: screen +#, no-c-format +msgid "" +"# service iptables " +"restart" +msgstr "# service iptables restart" + +#. Tag: para +#, no-c-format +msgid "" +"We now need to configure nfs service to start on boot and actually start it " +"on the host by executing the following commands:" +msgstr "最后需要配置NFS服务为开机自启动:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +" # service rpcbind start\n" +" # service nfs start\n" +" # chkconfig rpcbind on\n" +" # chkconfig nfs on\n" +" " +msgstr "\n # service rpcbind start\n # service nfs start\n # chkconfig rpcbind on\n # chkconfig nfs on\n " diff --git a/docs/runbook/zh-CN/Management.po b/docs/runbook/zh-CN/Management.po new file mode 100644 index 00000000000..63fc1497104 --- /dev/null +++ b/docs/runbook/zh-CN/Management.po @@ -0,0 +1,235 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +msgid "" +msgstr "" +"Project-Id-Version: Apache CloudStack Runbook\n" +"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" +"POT-Creation-Date: 2012-07-10T15:13:22\n" +"PO-Revision-Date: 2012-08-03 06:17+0000\n" +"Last-Translator: micexia \n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#. Tag: title +#, no-c-format +msgid "Installation of the management server" +msgstr "安装管理服务器" + +#. Tag: para +#, no-c-format +msgid "" +"Now it is time to start installing CloudStack's management server and some " +"of the related components." +msgstr "现在需要安装CloudStack管理服务器和相关的组件。" + +#. Tag: title +#, no-c-format +msgid "Database Installation and Configuration" +msgstr "数据库安装和配置" + +#. Tag: para +#, no-c-format +msgid "" +"We'll start out by installing MySQL and " +"configuring some options to ensure CloudStack runs well." +msgstr "首先安装MySQL,并对它进行配置,以确保CloudStack运行正常。" + +#. Tag: para +#, no-c-format +msgid "To install MySQL run the following command:" +msgstr "运行以下命令,安装MySQL :" + +#. Tag: screen +#, no-c-format +msgid "" +"# yum -y install mysql-" +"server" +msgstr "# yum -y install mysql-server" + +#. Tag: para +#, no-c-format +msgid "" +"With MySQL installed we need to make a few " +"configuration changes to /etc/my.cnf. Specifically we " +"need to add the following options to the [mysqld] section:" +msgstr "MySQL安装完成后,需更改其配置文件/etc/my.cnf,在[mysqld]下添加如下内容:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +"innodb_rollback_on_timeout=1\n" +"innodb_lock_wait_timeout=600\n" +"max_connections=350\n" +"log-bin=mysql-bin\n" +"binlog-format = 'ROW' \n" +" " +msgstr "\ninnodb_rollback_on_timeout=1\ninnodb_lock_wait_timeout=600\nmax_connections=350\nlog-bin=mysql-bin\nbinlog-format = 'ROW' \n " + +#. Tag: para +#, no-c-format +msgid "" +"Now that MySQL is properly configured we can " +"start it and configure it to start on boot as follows:" +msgstr "配置MySQL完成后,启动它并配置为开机自启动:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +" # service mysqld start\n" +" # chkconfig mysqld on\n" +" " +msgstr "\n # service mysqld start\n # chkconfig mysqld on\n " + +#. Tag: title +#, no-c-format +msgid "Extraction" +msgstr "解压安装包" + +#. Tag: para +#, no-c-format +msgid "" +"The next step is to extract the contents of the CloudStack tarball " +"(mentioned in ) you " +"downloaded previously. To extract the contents of this tarball use the " +"following command:" +msgstr "下一步是解压缩之前下载的CloudStack安装包(见),通过以下命令进行解压缩:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +" # tar -xzvf CloudStack-oss-3.0.2-1-rhel6.2.tar.gz\n" +" " +msgstr "\n # tar -xzvf CloudStack-oss-3.0.2-1-rhel6.2.tar.gz\n " + +#. Tag: para +#, no-c-format +msgid "" +"For the next few sections you'll need to cd into the " +"first level that was just created." +msgstr "以下章节需要您cd 进入刚创建的目录中" + +#. Tag: title +#, no-c-format +msgid "Installation" +msgstr "安装" + +#. Tag: para +#, no-c-format +msgid "" +"Now that you are in the directory created by extracting the tarball, it's " +"now time to install. We'll run ./install.sh and choose " +"option . This will install the management server and " +"necessary dependencies." +msgstr "当您进入解压缩后创建的目录中,下一步是安装。执行./install.sh并选择选项后,管理服务器和相关依赖会自动安装。" + +#. Tag: para +#, no-c-format +msgid "" +"With the application itself installed we can now setup the database, we'll " +"do that with the following command and options:" +msgstr "平台系统本身安装后,需初始化数据库,通过以下命令和选项完成:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +" # cloud-setup-databases cloud:password@localhost --deploy-as=root\n" +" " +msgstr "\n # cloud-setup-databases cloud:password@localhost --deploy-as=root\n " + +#. Tag: para +#, no-c-format +msgid "" +"When this process is finished, you should see a message like \"CloudStack " +"has successfully initialized the database.\"" +msgstr "当该过程结束后,您应该可以看到类似信息:\"CloudStack has successfully initialized the database.\"" + +#. Tag: para +#, no-c-format +msgid "" +"Now that the database has been created, we can take the final step in " +"setting up the management server by issuing the following command:" +msgstr "数据库创建后,最后一步是配置管理服务器,通过如下命令执行:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +" # cloud-setup-mangament\n" +" " +msgstr "\n # cloud-setup-mangament\n " + +#. Tag: title +#, no-c-format +msgid "System Template Setup" +msgstr "系统模板配置" + +#. Tag: para +#, no-c-format +msgid "" +"CloudStack uses a number of system VMs to provide functionality for " +"accessing the console of virtual machines, providing various networking " +"services, and managing various aspects of storage. This step will acquire " +"those system images ready for deployment when we bootstrap your cloud." +msgstr "CloudStack通过一系列系统虚拟机提供功能,如访问虚拟机控制台,如提供各类网络服务,以及管理次要存储的中的各类资源。该步骤会获取系统虚拟机模板,用于云平台引导后系统虚拟机的部署。" + +#. Tag: para +#, no-c-format +msgid "" +"The place we are going to download these images to is the secondary storage " +"share that we setup earlier, so we'll need to mount that share with the " +"mount command run on the management server:" +msgstr "模板下载后存放的位置是之前配置的次要存储目录,需先使用mount 命令挂载二级存储,在管理服务器上运行如下命令:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +" # mount -t nfs 172.16.10.2:/secondary /mnt/secondary\n" +" " +msgstr "\n # mount -t nfs 172.16.10.2:/secondary /mnt/secondary\n " + +#. Tag: para +#, no-c-format +msgid "" +"Now we need to download the system VM template and deploy that to the share " +"we just mounted. The management server includes a script to properly " +"manipulate the system VMs images." +msgstr "然后需要下载系统虚拟机模板,并把这些模板部署于刚才创建的次要存储中;管理服务器包含一个脚本可以正确的操作系统虚拟机模板:" + +#. Tag: screen +#, no-c-format +msgid "" +"\n" +" # /usr/lib64/cloud/agent/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -F\n" +" " +msgstr "\n # /usr/lib64/cloud/agent/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -F\n " + +#. Tag: para +#, no-c-format +msgid "" +"That concludes our setup of the management server. We still need to " +"configure CloudStack, but we will do that after we get our hypervisor set " +"up." +msgstr "以上是管理服务器的安装和配置过程;在配置CloudStack之前,需启用hypervisor" diff --git a/docs/runbook/zh-CN/Overview.po b/docs/runbook/zh-CN/Overview.po new file mode 100644 index 00000000000..1bfd88f8645 --- /dev/null +++ b/docs/runbook/zh-CN/Overview.po @@ -0,0 +1,130 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +msgid "" +msgstr "" +"Project-Id-Version: Apache CloudStack Runbook\n" +"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" +"POT-Creation-Date: 2012-07-10T15:13:22\n" +"PO-Revision-Date: 2012-08-03 09:27+0000\n" +"Last-Translator: micexia \n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#. Tag: title +#, no-c-format +msgid "Overview" +msgstr "概述" + +#. Tag: para +#, no-c-format +msgid "" +"Infrastructure-as-a-Service (IaaS) clouds can be a complex thing to build, " +"and by definition they have a plethora of options, which often lead to " +"confusion for even experienced admins who are newcomers to building cloud " +"platforms. The goal for this runbook is to provide a straightforward set of " +"instructions to get you up and running with CloudStack with a minimum amount" +" of trouble." +msgstr "搭建基础设施即服务 (Infrastructure-as-a-Service, IaaS)云平台是一件复杂的工作,它提供了过于多的选项,以至于甚至是经验丰富的管理员在刚开始搭建云平台时也会感到困惑。该操作手册的目标是提供一系列直观的说明以帮助您搭建并运行一个最简单的Cloudstack环境。" + +#. Tag: title +#, no-c-format +msgid "What exactly are we building?" +msgstr "目标部署环境" + +#. Tag: para +#, no-c-format +msgid "" +"This runbook will focus on building a CloudStack cloud using KVM with CentOS" +" 6.2 with NFS storage on a flat layer-2 network utilizing layer-3 network " +"isolation (aka Security Groups), and doing it all on a single piece of " +"hardware." +msgstr "该操作手册将重点介绍如何搭建如下Cloudstack云平台:\n使用CentOS 6.2 KVM\n部署于扁平二层网络\n使用三层网络隔离(安全组)\n所有资源集中于一台物理主机" + +#. Tag: para +#, no-c-format +msgid "" +"KVM, or Kernel-based Virtual Machine is a virtualization technology for the " +"Linux kernel. KVM supports native virtualization atop processors with " +"hardware virtualization extensions." +msgstr "KVM (Kernel-based Virtual Machine) 是一种针对LInux内核的虚拟化技术。KVM支持本地虚拟化,主机的CPU处理器需支持硬件虚拟化扩展。" + +#. Tag: para +#, no-c-format +msgid "" +"Security Groups act as distributed firewalls that control access to a group " +"of virtual machines." +msgstr "安全组起到类似分布式防火墙的作用,它可以对一组虚拟机进行访问控制。" + +#. Tag: title +#, no-c-format +msgid "High level overview of the process" +msgstr "整体过程概述" + +#. Tag: para +#, no-c-format +msgid "" +"Before we actually get to installing CloudStack, we'll start with installing" +" our base operating system, and then configuring that to act as an NFS " +"server for several types of storage. We'll install the management server, " +"download the systemVMs, and finally install the agent software. Finally " +"we'll spend a good deal of time configuring the entire cloud in the " +"CloudStack web interface." +msgstr "在实际安装Cloudstack之前,需要先安装基本的操作系统,并将其配置为一台NFS服务器用于提供各类存储资源。接下来的步骤是安装管理服务器,下载系统虚拟机模板,安装agnet;最后将描述如何使用Cloudstack Web界面配置整个云平台。" + +#. Tag: title +#, no-c-format +msgid "Prerequisites" +msgstr "先决条件" + +#. Tag: para +#, no-c-format +msgid "To complete this runbook you'll need the following items:" +msgstr "完成此操作手册您需要以下资源:" + +#. Tag: para +#, no-c-format +msgid "At least one computer which supports hardware virtualization." +msgstr "至少一台支持硬件虚拟化的主机" + +#. Tag: para +#, no-c-format +msgid "" +"The " +" CentOS 6.2 x86_64 minimal install CD " +msgstr " CentOS 6.2 x86_64 minimal install CD " + +#. Tag: para +#, no-c-format +msgid "" +"A /24 network with the gateway being at xxx.xxx.xxx.1, no DHCP should be on " +"this network and none of the computers running CloudStack may have a dynamic" +" address." +msgstr "一个C类网络,网关为 xxx.xxx.xxx.1,网络中不能存在DHCP服务器,所有运行Cloudstack的主机需使用静态IP地址。" + +#. Tag: para +#, no-c-format +msgid "" +"Copy of CloudStack 3.0.2 for RHEL" +" and CentOS 6.2 " +msgstr "安装包 CloudStack 3.0.2 for RHEL and CentOS 6.2 " diff --git a/docs/runbook/zh-CN/Preface.po b/docs/runbook/zh-CN/Preface.po new file mode 100644 index 00000000000..43f8b31f5a2 --- /dev/null +++ b/docs/runbook/zh-CN/Preface.po @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +msgid "" +msgstr "" +"Project-Id-Version: Apache CloudStack Runbook\n" +"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" +"POT-Creation-Date: 2012-07-10T15:13:22\n" +"PO-Revision-Date: 2012-08-03 04:53+0000\n" +"Last-Translator: micexia \n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#. Tag: title +#, no-c-format +msgid "Preface" +msgstr "前言" diff --git a/docs/runbook/zh-CN/Revision_History.po b/docs/runbook/zh-CN/Revision_History.po new file mode 100644 index 00000000000..7b8b91da94b --- /dev/null +++ b/docs/runbook/zh-CN/Revision_History.po @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +msgid "" +msgstr "" +"Project-Id-Version: Apache CloudStack Runbook\n" +"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" +"POT-Creation-Date: 2012-07-10T15:13:22\n" +"PO-Revision-Date: 2012-08-03 04:52+0000\n" +"Last-Translator: micexia \n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#. Tag: title +#, no-c-format +msgid "Revision History" +msgstr "修订历史" + +#. Tag: member +#, no-c-format +msgid "Initial creation of book by publican" +msgstr "最初创建版本" diff --git a/docs/runbook/zh-CN/Runbook.po b/docs/runbook/zh-CN/Runbook.po new file mode 100644 index 00000000000..2eee6c1bc90 --- /dev/null +++ b/docs/runbook/zh-CN/Runbook.po @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License.# +msgid "" +msgstr "" +"Project-Id-Version: 0\n" +"POT-Creation-Date: 2012-08-04T04:05:40\n" +"PO-Revision-Date: 2012-08-04T04:05:40\n" +"Last-Translator: Automatically generated\n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: application/x-publican; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + diff --git a/docs/runbook/zh-CN/config.po b/docs/runbook/zh-CN/config.po new file mode 100644 index 00000000000..507b635328e --- /dev/null +++ b/docs/runbook/zh-CN/config.po @@ -0,0 +1,280 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +msgid "" +msgstr "" +"Project-Id-Version: Apache CloudStack Runbook\n" +"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" +"POT-Creation-Date: 2012-08-06T15:22:17\n" +"PO-Revision-Date: 2012-08-07 04:40+0000\n" +"Last-Translator: micexia \n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#. Tag: title +#, no-c-format +msgid "Configuration" +msgstr "云平台配置" + +#. Tag: para +#, no-c-format +msgid "" +"As we noted before we will be using security groups to provide isolation and" +" by default that implies that we'll be using a flat layer-2 network. It also" +" means that the simplicity of our setup means that we can use the quick " +"installer." +msgstr "如上文所述,该手册所描述的环境将使用安全组提供网络隔离,这意味着您的安装环境仅需要一个扁平的二层网络,同样意味着较为简单的配置和快速的安装。" + +#. Tag: title +#, no-c-format +msgid "UI Access" +msgstr "访问用户界面" + +#. Tag: para +#, no-c-format +msgid "" +"To get access to CloudStack's web interface, merely point your browser to " +"http://172.16.10.2:8080/client The default " +"username is 'admin', and the default password is 'password'. You should see " +"a splash screen that allows you to choose several options for setting up " +"CloudStack. You should choose the " +" option." +msgstr "访问CloudStack用户界面,仅需通过浏览器访问: http://172.16.10.2:8080/client 默认的用户名为”admin“,默认密码是“password”。第一次登录后可以看到欢迎界面,该界面提供两个配置CloudStack的选项,请选择“继续执行基本安装”。" + +#. Tag: para +#, no-c-format +msgid "" +"You should now see a prompt requiring you to change the password for the " +"admin user. Please do so." +msgstr "此时您会看到提示,要求为admin用户更改密码,请更新密码后继续。" + +#. Tag: title +#, no-c-format +msgid "Setting up a Zone" +msgstr "配置区域" + +#. Tag: para +#, no-c-format +msgid "" +"A zone is the largest organization entity in CloudStack - and we'll be " +"creating one, this should be the screen that you see in front of you now. " +"And for us there are 5 pieces of information that we need." +msgstr "区域是CloudStack平台中最大的组织单位,下面将会讲述如何创建一个区域;此时屏幕中显示的是区域添加页面,这里需要您提供以下5项信息:" + +#. Tag: para +#, no-c-format +msgid "Name - we will set this to the ever-descriptive 'Zone1' for our cloud." +msgstr "名称 - 提供描述性的名称,这里以\"Zone1\"为例" + +#. Tag: para +#, no-c-format +msgid "Public DNS 1 - we will set this to '8.8.8.8' for our cloud." +msgstr "DNS1 - 设置为 8.8.8.8" + +#. Tag: para +#, no-c-format +msgid "Public DNS 2 - we will set this to '8.8.4.4' for our cloud." +msgstr "DNS2 - 设置 为8.8.4.4" + +#. Tag: para +#, no-c-format +msgid "Internal DNS1 - we will also set this to '8.8.8.8' for our cloud." +msgstr "内部DNS1 - 同样设置为 8.8.8.8" + +#. Tag: para +#, no-c-format +msgid "Internal DNS2 - we will also set this to '8.8.8.4' for our cloud." +msgstr "内部DNS2 - 同样设置为 8.8.4.4" + +#. Tag: title +#, no-c-format +msgid "Notes about DNS settings" +msgstr "关于DNS设置" + +#. Tag: para +#, no-c-format +msgid "" +"CloudStack distinguishes between internal and public DNS. Internal DNS is " +"assumed to be capable of resolving internal-only hostnames, such as your NFS" +" server’s DNS name. Public DNS is provided to the guest VMs to resolve " +"public IP addresses. You can enter the same DNS server for both types, but " +"if you do so, you must make sure that both internal and public IP addresses " +"can route to the DNS server. In our specific case we will not use any names " +"for resources internally, and we have indeed them set to look to the same " +"external resource so as to not add a namerserver setup to our list of " +"requirements." +msgstr "CloudStack区分内部和外部DNS。内部DNS用于解析仅内部使用的主机名,例如NFS服务器的主机名;外部DNS用于为用户虚拟机提供外网IP地址解析。您可以为以上两种类型输入相同的DNS服务器,但必须确认内部和外网IP地址都存在到达该DNS服务器的路由。本手册描述的环境中,内部资源不使用主机名,因此这里将其设置为与外部DNS一致以简化安装,从而不必为此再安装一台DNS服务器。" + +#. Tag: title +#, no-c-format +msgid "Pod Configuration" +msgstr "配置提供点" + +#. Tag: para +#, no-c-format +msgid "" +"Now that we've added a Zone, the next step that comes up is a prompt for " +"information regading a pod. Which is looking for 4 items." +msgstr "到这里您已经添加了一个区域,下一步后会显示提供点的相关信息,以及添加提供点所需信息:" + +#. Tag: para +#, no-c-format +msgid "Name - We'll use Pod1 for our cloud." +msgstr "名称 - 这里填写“Pod1”为例" + +#. Tag: para +#, no-c-format +msgid "" +"Gateway - We'll use 172.16.10.1 as our gateway" +msgstr "网关 - 输入 172.16.10.1 " + +#. Tag: para +#, no-c-format +msgid "Netmask - We'll use 255.255.255.0" +msgstr "网络掩码 - 输入 255.255.255.0" + +#. Tag: para +#, no-c-format +msgid "Start/end reserved system IPs - we will use 172.16.10.10-172.16.10.20" +msgstr "IP范围 - 输入172.16.10.10-172.16.10.20为例" + +#. Tag: para +#, no-c-format +msgid "Guest gateway - We'll use 172.16.10.1" +msgstr "用户网关 - 使用 172.16.10.1 " + +#. Tag: para +#, no-c-format +msgid "Guest netmask - We'll use 255.255.255.0" +msgstr "用户掩码 - 输入 255.255.255.0" + +#. Tag: para +#, no-c-format +msgid "" +"Guest start/end IP - We'll use " +"172.16.10.30-172.16.10.200" +msgstr "IP范围 - 这里使用 172.16.10.30-172.16.10.200" + +#. Tag: title +#, no-c-format +msgid "Cluster" +msgstr "集群" + +#. Tag: para +#, no-c-format +msgid "" +"Now that we've added a Zone, we need only add a few more items for " +"configuring the cluster." +msgstr "添加区域和提供点之后,仅需提供以下信息以配置集群:" + +#. Tag: para +#, no-c-format +msgid "Name - We'll use Cluster1" +msgstr "名称 - 这里使用 Cluster1 为例" + +#. Tag: para +#, no-c-format +msgid "Hypervisor - Choose KVM" +msgstr "Hypervisor - 选择KVM" + +#. Tag: para +#, no-c-format +msgid "" +"You should be prompted to add the first host to your cluster at this point. " +"Only a few bits of information are needed." +msgstr "此时向导会提示您为集群添加第一台主机,需提供如下信息:" + +#. Tag: para +#, no-c-format +msgid "" +"Hostname - we'll use the IP address 172.16.10.2 " +"since we didn't set up a DNS server." +msgstr "主机名称 - 由于没有配置内部DNS服务,这里使用IP地址172.16.10.2" + +#. Tag: para +#, no-c-format +msgid "Username - we'll use 'root'" +msgstr "用户名 - 输入 root" + +#. Tag: para +#, no-c-format +msgid "Password - enter the operating system password for the root user" +msgstr "密码 - 输入操作系统中root用户的密码" + +#. Tag: title +#, no-c-format +msgid "Primary Storage" +msgstr "主存储" + +#. Tag: para +#, no-c-format +msgid "" +"With you cluster now setup - you should be prompted for primary storage " +"information. Choose NFS as the storage type and then enter the following " +"values in the fields:" +msgstr "集群配置过程中需提供主存储信息,存储类型选择NFS,并提供以下信息:" + +#. Tag: para +#, no-c-format +msgid "Name - We'll use 'Primary1'" +msgstr "名称 - 输入‘Primary1'" + +#. Tag: para +#, no-c-format +msgid "" +"Server - We'll be using the IP address " +"172.16.10.2" +msgstr "名称 - 这里输入IP地址172.16.10.2" + +#. Tag: para +#, no-c-format +msgid "Path - Well define /primary as the path we are using" +msgstr "路径 - 输入/primary" + +#. Tag: title +#, no-c-format +msgid "Secondary Storage" +msgstr "次要存储" + +#. Tag: para +#, no-c-format +msgid "" +"If this is a new zone, you'll be prompted for secondary storage information " +"- populate it as follows:" +msgstr "如果添加的区域是一个新的区域,您需提供次要存储相关信息:" + +#. Tag: para +#, no-c-format +msgid "" +"NFS server - We'll use the IP address 172.16.10.2" +msgstr "NFS服务器 - 输入IP地址 172.16.10.2" + +#. Tag: para +#, no-c-format +msgid "Path - We'll use /secondary" +msgstr "路径 - 输入 /secondary " + +#. Tag: para +#, no-c-format +msgid "" +"Now, click Launch and your cloud should begin setup - it may take several " +"minutes depending on your internet connection speed for setup to finalize." +msgstr "现在,点击“启动”然后您的云平台将开始配置,依赖于您实际的网络速度,配置过程可能耗时几分钟" diff --git a/docs/runbook/zh-CN/kvm.po b/docs/runbook/zh-CN/kvm.po new file mode 100644 index 00000000000..906f82eea55 --- /dev/null +++ b/docs/runbook/zh-CN/kvm.po @@ -0,0 +1,145 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +msgid "" +msgstr "" +"Project-Id-Version: Apache CloudStack Runbook\n" +"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" +"POT-Creation-Date: 2012-07-10T15:13:22\n" +"PO-Revision-Date: 2012-08-03 09:24+0000\n" +"Last-Translator: micexia \n" +"Language-Team: None\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#. Tag: title +#, no-c-format +msgid "KVM Setup and installation" +msgstr "KVM配置和安装" + +#. Tag: para +#, no-c-format +msgid "" +"KVM is the hypervisor we'll be using - we will recover the initial setup " +"which has already been done on the hypervisor host and cover installation of" +" the agent software, you can use the same steps to add additional KVM nodes " +"to your CloudStack environment." +msgstr "本文档使用KVM作为hypervisor,下文将回顾最如何配置hypervisor主机,其中大部分配置工作已在配置管理节点时完成;接下来描述如何安装agent。您可以应用相同的步骤添加额外的KVM节点到CloudStack环境中。" + +#. Tag: title +#, no-c-format +msgid "Prerequisites" +msgstr "先决条件" + +#. Tag: para +#, no-c-format +msgid "" +"We explicitly are using the management server as a compute node as well, " +"which means that we have already performed many of the prerequisite steps " +"when setting up the management server, but we will list them here for " +"clarity. Those steps are:" +msgstr "本文档描述的环境使用管理服务器同时作为计算节点,这意味着很多先决步骤已经在搭建管理服务器时完成;但为了清晰起见,仍然列出相关步骤:" + +#. Tag: para +#, no-c-format +msgid "" +msgstr "" + +#. Tag: para +#, no-c-format +msgid "" +msgstr "" + +#. Tag: para +#, no-c-format +msgid "" +msgstr "" + +#. Tag: para +#, no-c-format +msgid "" +msgstr "" + +#. Tag: para +#, no-c-format +msgid "" +"You shouldn't need to do that for the management server, of course, but any " +"additional hosts will need for you to complete the above steps." +msgstr "您不需要在管理服务器上执行这些步骤,当然,如果您需要添加额外的主机以上步骤仍然需要执行。" + +#. Tag: title +#, no-c-format +msgid "Installation" +msgstr "安装" + +#. Tag: para +#, no-c-format +msgid "" +"You'll need to ensure that you are in the directory that was created when we" +" extracted the the tarball. " +msgstr "确认您已进入解压安装包后产生的目录。" + +#. Tag: para +#, no-c-format +msgid "" +"You'll be running ./install.sh again and this time " +"choosing which will install the software necessary for " +"managing a KVM node." +msgstr "再次运行./install.sh,这一次选择安装选项,这会安装管理KVM所需的相关软件包。" + +#. Tag: title +#, no-c-format +msgid "KVM Configuration" +msgstr "KVM配置" + +#. Tag: para +#, no-c-format +msgid "" +"KVM configuration is relatively simple at only a single item. We need to " +"edit the QEMU VNC configuration. This is done by editing " +"/etc/libvirt/qemu.conf and ensuring the following line " +"is present and uncommented." +msgstr "KVM的配置相对简单,仅需一项配置;编辑QEMU VNC配置文件/etc/libvirt/qemu.conf,并确保以下内容存在并且没有被注释掉。" + +#. Tag: screen +#, no-c-format +msgid "vnc_listen=0.0.0.0" +msgstr "vnc_listen=0.0.0.0" + +#. Tag: para +#, no-c-format +msgid "" +"You can now just restart the libvirt daemon by issuing the following " +"command:" +msgstr "此时您可以重启libvirt服务,通过以下命令:" + +#. Tag: screen +#, no-c-format +msgid "" +"# service libvirt " +"restart" +msgstr "# service libvirt restart" + +#. Tag: para +#, no-c-format +msgid "" +"That concludes our installation and configuration of KVM, and we'll now move" +" to using the CloudStack UI for the actual configuration of our cloud." +msgstr "以上内容是KVM的安装和配置,下面将介绍如何使用CloudStack用户界面配置云平台。" diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtCapXMLParser.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtCapXMLParser.java index 4c12d35ab0a..aa0bfe275ef 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtCapXMLParser.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtCapXMLParser.java @@ -35,6 +35,7 @@ public class LibvirtCapXMLParser extends LibvirtXMLParser { private boolean _osType = false; private boolean _domainTypeKVM = false; private boolean _emulatorFlag = false; + private boolean _archTypex86_64 = false; private final StringBuffer _emulator = new StringBuffer(); private final StringBuffer _capXML = new StringBuffer(); private static final Logger s_logger = Logger @@ -54,7 +55,8 @@ public class LibvirtCapXMLParser extends LibvirtXMLParser { _domainTypeKVM = false; } else if (qName.equalsIgnoreCase("emulator")) { _emulatorFlag = false; - + } else if (qName.equalsIgnoreCase("arch")) { + _archTypex86_64 = false; } else if (_host) { _capXML.append("<").append("/").append(qName).append(">"); } @@ -68,6 +70,7 @@ public class LibvirtCapXMLParser extends LibvirtXMLParser { } else if (_osType) { guestOsTypes.add(new String(ch, start, length)); } else if (_emulatorFlag) { + s_logger.debug("Found " + new String(ch, start, length) + " as a suiteable emulator"); _emulator.append(ch, start, length); } } @@ -83,6 +86,13 @@ public class LibvirtCapXMLParser extends LibvirtXMLParser { if (_guest) { _osType = true; } + } else if (qName.equalsIgnoreCase("arch")) { + for (int i = 0; i < attributes.getLength(); i++) { + if (attributes.getQName(i).equalsIgnoreCase("name") + && attributes.getValue(i).equalsIgnoreCase("x86_64")) { + _archTypex86_64 = true; + } + } } else if (qName.equalsIgnoreCase("domain")) { for (int i = 0; i < attributes.getLength(); i++) { if (attributes.getQName(i).equalsIgnoreCase("type") @@ -90,7 +100,7 @@ public class LibvirtCapXMLParser extends LibvirtXMLParser { _domainTypeKVM = true; } } - } else if (qName.equalsIgnoreCase("emulator") && _domainTypeKVM) { + } else if (qName.equalsIgnoreCase("emulator") && _domainTypeKVM && _archTypex86_64) { _emulatorFlag = true; _emulator.delete(0, _emulator.length()); } else if (_host) { diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 7bff023b68b..1f831f9bb41 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -3599,24 +3599,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements } private String getHypervisorPath(Connect conn) { - File f = new File("/usr/bin/cloud-qemu-kvm"); - if (f.exists()) { - return "/usr/bin/cloud-qemu-kvm"; - } else { - f = new File("/usr/libexec/cloud-qemu-kvm"); - if (f.exists()) { - return "/usr/libexec/cloud-qemu-kvm"; - } - LibvirtCapXMLParser parser = new LibvirtCapXMLParser(); try { parser.parseCapabilitiesXML(conn.getCapabilities()); } catch (LibvirtException e) { - + s_logger.debug(e.getMessage()); } return parser.getEmulator(); } - } private String getGuestType(Connect conn, String vmName) { LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser(); diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java b/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java index 368ef06fa90..80c8ecfe9bb 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java @@ -20,6 +20,7 @@ package com.cloud.network.element; import java.util.List; import java.util.Map; import java.util.ArrayList; +import java.util.Set; import javax.ejb.Local; @@ -132,7 +133,7 @@ public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl impleme } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java index 1f9356f7ea8..8490534f613 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java @@ -182,7 +182,7 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } diff --git a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java index 1b1cbf7aec7..d685ad973c1 100644 --- a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java +++ b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java @@ -453,7 +453,7 @@ public class F5ExternalLoadBalancerElement extends ExternalLoadBalancerDeviceMan } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } diff --git a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java index 0473291d15d..ad2516b3b44 100644 --- a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java +++ b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java @@ -528,7 +528,7 @@ public class JuniperSRXExternalFirewallElement extends ExternalFirewallDeviceMan } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java index 83fea00d91d..f7be3fab478 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java @@ -15,6 +15,7 @@ package com.cloud.network.element; import java.net.URI; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -590,15 +591,19 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl } @Override - public boolean verifyServicesCombination(List services) { - List netscalerServices = new ArrayList(); - netscalerServices.add(Service.Lb.getName()); - netscalerServices.add(Service.StaticNat.getName()); + public boolean verifyServicesCombination(Set services) { + Set netscalerServices = new HashSet(); + netscalerServices.add(Service.Lb); + netscalerServices.add(Service.StaticNat); // NetScaler can only act as Lb and Static Nat service provider if (services != null && !services.isEmpty() && !netscalerServices.containsAll(services)) { + String servicesList = ""; + for (Service service : services) { + servicesList += service.getName() + " "; + } s_logger.warn("NetScaler network element can only support LB and Static NAT services and service combination " - + services + " is not supported."); + + servicesList + " is not supported."); return false; } diff --git a/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java b/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java index 6cc670c7f78..bff69aa235b 100644 --- a/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java +++ b/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java @@ -38,6 +38,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import javax.ejb.Local; @@ -297,7 +298,7 @@ public class NiciraNvpElement extends AdapterBase implements ConnectivityProvide } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } @@ -514,5 +515,5 @@ public class NiciraNvpElement extends AdapterBase implements ConnectivityProvide } return new DeleteHostAnswer(true); } - + } diff --git a/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java b/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java index 9d571ae92bf..bf785e6710c 100644 --- a/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java +++ b/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java @@ -18,6 +18,7 @@ package com.cloud.network.element; import java.util.List; import java.util.Map; +import java.util.Set; import javax.ejb.Local; @@ -130,7 +131,7 @@ public class OvsElement extends AdapterBase implements NetworkElement { } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } } diff --git a/python/lib/cloudutils/syscfg.py b/python/lib/cloudutils/syscfg.py index e8ddcf71069..731b4c71abf 100755 --- a/python/lib/cloudutils/syscfg.py +++ b/python/lib/cloudutils/syscfg.py @@ -73,7 +73,7 @@ class sysConfig(object): for service in self.services: if not service.configration(): - raise CloudInternalException() + raise CloudInternalException("Configuration failed for service %s" % service.serviceName) def restore(self): for service in self.services: diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index 7a99a487916..eb5e7705a5d 100755 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -53,6 +53,7 @@ import javax.crypto.spec.SecretKeySpec; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; +import org.apache.commons.codec.binary.Base64; import org.apache.http.ConnectionClosedException; import org.apache.http.HttpException; import org.apache.http.HttpRequest; @@ -112,7 +113,6 @@ import com.cloud.utils.component.PluggableService; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; -import com.cloud.utils.encoding.Base64; import com.cloud.utils.exception.CSExceptionErrorCode; import com.cloud.uuididentity.dao.IdentityDao; @@ -752,7 +752,7 @@ public class ApiServer implements HttpRequestHandler { mac.init(keySpec); mac.update(unsignedRequest.getBytes()); byte[] encryptedBytes = mac.doFinal(); - String computedSignature = Base64.encodeBytes(encryptedBytes); + String computedSignature = Base64.encodeBase64String(encryptedBytes); boolean equalSig = signature.equals(computedSignature); if (!equalSig) { s_logger.info("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); @@ -844,7 +844,7 @@ public class ApiServer implements HttpRequestHandler { SecureRandom sesssionKeyRandom = new SecureRandom(); byte sessionKeyBytes[] = new byte[20]; sesssionKeyRandom.nextBytes(sessionKeyBytes); - String sessionKey = Base64.encodeBytes(sessionKeyBytes); + String sessionKey = Base64.encodeBase64String(sessionKeyBytes); session.setAttribute("sessionkey", sessionKey); return; diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 4cbc035ee76..116af37e789 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -310,7 +310,7 @@ public enum Config { AgentLbEnable("Advanced", ManagementServer.class, Boolean.class, "agent.lb.enabled", "false", "If agent load balancing enabled in cluster setup", null), SubDomainNetworkAccess("Advanced", NetworkManager.class, Boolean.class, "allow.subdomain.network.access", "true", "Allow subdomains to use networks dedicated to their parent domain(s)", null), UseExternalDnsServers("Advanced", NetworkManager.class, Boolean.class, "use.external.dns", "false", "Bypass internal dns, use exetrnal dns1 and dns2", null), - EncodeApiResponse("Advanced", ManagementServer.class, Boolean.class, "encode.api.response", "false", "Do UTF-8 encoding for the api response, false by default", null), + EncodeApiResponse("Advanced", ManagementServer.class, Boolean.class, "encode.api.response", "false", "Do URL encoding for the api response, false by default", null), DnsBasicZoneUpdates("Advanced", NetworkManager.class, String.class, "network.dns.basiczone.updates", "all", "This parameter can take 2 values: all (default) and pod. It defines if DHCP/DNS requests have to be send to all dhcp servers in cloudstack, or only to the one in the same pod", "all,pod"), ClusterMessageTimeOutSeconds("Advanced", ManagementServer.class, Integer.class, "cluster.message.timeout.seconds", "300", "Time (in seconds) to wait before a inter-management server message post times out.", null), diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index 0ad1e8c250f..94c773882d5 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -6732,14 +6732,6 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag l.add(service); } - for (String provider : providerSvcs.keySet()) { - NetworkElement element = getElementImplementingProvider(provider); - List services = providerSvcs.get(provider); - if (!element.verifyServicesCombination(services)) { - throw new UnsupportedServiceException("Provider " + provider + " doesn't support services combination: " + services); - } - } - return svcProviders; } @@ -6836,11 +6828,16 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag + provider.getName()); } } + List serviceList = new ArrayList(); for (Service service : enabledServices) { // check if the service is provided by this Provider if (!element.getCapabilities().containsKey(service)) { throw new UnsupportedServiceException(provider.getName() + " Provider cannot provide service " + service.getName()); } + serviceList.add(service.getName()); + } + if (!element.verifyServicesCombination(enabledServices)) { + throw new UnsupportedServiceException("Provider " + provider.getName() + " doesn't support services combination: " + serviceList); } } } diff --git a/server/src/com/cloud/network/element/BareMetalElement.java b/server/src/com/cloud/network/element/BareMetalElement.java index fee411af5e1..842af833b6b 100644 --- a/server/src/com/cloud/network/element/BareMetalElement.java +++ b/server/src/com/cloud/network/element/BareMetalElement.java @@ -18,6 +18,7 @@ package com.cloud.network.element; import java.util.List; import java.util.Map; +import java.util.Set; import javax.ejb.Local; @@ -122,7 +123,7 @@ public class BareMetalElement extends AdapterBase implements NetworkElement { } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } } diff --git a/server/src/com/cloud/network/element/CloudZonesNetworkElement.java b/server/src/com/cloud/network/element/CloudZonesNetworkElement.java index b3fbeac2ea4..5f93cac5f13 100644 --- a/server/src/com/cloud/network/element/CloudZonesNetworkElement.java +++ b/server/src/com/cloud/network/element/CloudZonesNetworkElement.java @@ -19,6 +19,7 @@ package com.cloud.network.element; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import javax.ejb.Local; @@ -237,7 +238,7 @@ public class CloudZonesNetworkElement extends AdapterBase implements NetworkElem } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } diff --git a/server/src/com/cloud/network/element/ExternalDhcpElement.java b/server/src/com/cloud/network/element/ExternalDhcpElement.java index fd95bbf346c..896cd85cd01 100755 --- a/server/src/com/cloud/network/element/ExternalDhcpElement.java +++ b/server/src/com/cloud/network/element/ExternalDhcpElement.java @@ -19,6 +19,7 @@ package com.cloud.network.element; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import javax.ejb.Local; @@ -146,7 +147,7 @@ public class ExternalDhcpElement extends AdapterBase implements NetworkElement, } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } } diff --git a/server/src/com/cloud/network/element/SecurityGroupElement.java b/server/src/com/cloud/network/element/SecurityGroupElement.java index 74bc54c1dd5..26b33c949e6 100644 --- a/server/src/com/cloud/network/element/SecurityGroupElement.java +++ b/server/src/com/cloud/network/element/SecurityGroupElement.java @@ -19,6 +19,7 @@ package com.cloud.network.element; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import javax.ejb.Local; @@ -105,7 +106,7 @@ public class SecurityGroupElement extends AdapterBase implements NetworkElement } @Override - public boolean verifyServicesCombination(List services) { + public boolean verifyServicesCombination(Set services) { return true; } } diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java index 54543974f76..f3941b60996 100755 --- a/server/src/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VirtualRouterElement.java @@ -864,11 +864,16 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @Override - public boolean verifyServicesCombination(List services) { - if (!services.contains("SourceNat")) { - if (services.contains("StaticNat") || services.contains("Firewall") || services.contains("Lb") || services.contains("PortForwarding") || - services.contains("Vpn")) { - s_logger.warn("Virtual router can't enable services " + services + " without source NAT service"); + public boolean verifyServicesCombination(Set services) { + if (!services.contains(Service.SourceNat)) { + if (services.contains(Service.StaticNat) || services.contains(Service.Firewall) || services.contains(Service.Lb) || + services.contains(Service.PortForwarding) || services.contains(Service.Vpn)) { + String servicesList = "["; + for (Service service : services) { + servicesList += service.getName() + " "; + } + servicesList += "]"; + s_logger.warn("Virtual router can't enable services " + servicesList + " without source NAT service"); return false; } } diff --git a/server/src/com/cloud/servlet/ConsoleProxyServlet.java b/server/src/com/cloud/servlet/ConsoleProxyServlet.java index 93a401cd125..a052ce286b1 100644 --- a/server/src/com/cloud/servlet/ConsoleProxyServlet.java +++ b/server/src/com/cloud/servlet/ConsoleProxyServlet.java @@ -58,61 +58,61 @@ import com.cloud.vm.VirtualMachineManager; * Authentication : /console?cmd=auth&vm=xxx&sid=xxx */ public class ConsoleProxyServlet extends HttpServlet { - private static final long serialVersionUID = -5515382620323808168L; - public static final Logger s_logger = Logger.getLogger(ConsoleProxyServlet.class.getName()); - private static final int DEFAULT_THUMBNAIL_WIDTH = 144; - private static final int DEFAULT_THUMBNAIL_HEIGHT = 110; - - private final static AccountManager _accountMgr = ComponentLocator.getLocator(ManagementServer.Name).getManager(AccountManager.class); - private final static VirtualMachineManager _vmMgr = ComponentLocator.getLocator(ManagementServer.Name).getManager(VirtualMachineManager.class); - private final static ManagementServer _ms = (ManagementServer)ComponentLocator.getComponent(ManagementServer.Name); - private final static IdentityService _identityService = (IdentityService)ComponentLocator.getLocator(ManagementServer.Name).getManager(IdentityService.class); - - @Override + private static final long serialVersionUID = -5515382620323808168L; + public static final Logger s_logger = Logger.getLogger(ConsoleProxyServlet.class.getName()); + private static final int DEFAULT_THUMBNAIL_WIDTH = 144; + private static final int DEFAULT_THUMBNAIL_HEIGHT = 110; + + private final static AccountManager _accountMgr = ComponentLocator.getLocator(ManagementServer.Name).getManager(AccountManager.class); + private final static VirtualMachineManager _vmMgr = ComponentLocator.getLocator(ManagementServer.Name).getManager(VirtualMachineManager.class); + private final static ManagementServer _ms = (ManagementServer)ComponentLocator.getComponent(ManagementServer.Name); + private final static IdentityService _identityService = ComponentLocator.getLocator(ManagementServer.Name).getManager(IdentityService.class); + + @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) { - doGet(req, resp); - } - - @Override + doGet(req, resp); + } + + @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) { - - try { - if(_accountMgr == null || _vmMgr == null || _ms == null) { - sendResponse(resp, "Service is not ready"); - return; - } - - if(_ms.getHashKey() == null) { - s_logger.debug("Console/thumbnail access denied. Ticket service is not ready yet"); - sendResponse(resp, "Service is not ready"); - return; - } - + + try { + if(_accountMgr == null || _vmMgr == null || _ms == null) { + sendResponse(resp, "Service is not ready"); + return; + } + + if(_ms.getHashKey() == null) { + s_logger.debug("Console/thumbnail access denied. Ticket service is not ready yet"); + sendResponse(resp, "Service is not ready"); + return; + } + String userId = null; String account = null; Account accountObj = null; - + Map params = new HashMap(); params.putAll(req.getParameterMap()); HttpSession session = req.getSession(false); if(session == null) { - if(verifyRequest(params)) { + if(verifyRequest(params)) { userId = (String)params.get("userid")[0]; account = (String)params.get("account")[0]; accountObj = (Account)params.get("accountobj")[0]; - } else { - s_logger.debug("Invalid web session or API key in request, reject console/thumbnail access"); - sendResponse(resp, "Access denied. Invalid web session or API key in request"); - return; - } + } else { + s_logger.debug("Invalid web session or API key in request, reject console/thumbnail access"); + sendResponse(resp, "Access denied. Invalid web session or API key in request"); + return; + } } else { - // adjust to latest API refactoring changes - if(session.getAttribute("userid") != null) { + // adjust to latest API refactoring changes + if(session.getAttribute("userid") != null) { userId = ((Long)session.getAttribute("userid")).toString(); } - - accountObj = (Account)session.getAttribute("accountobj"); + + accountObj = (Account)session.getAttribute("accountobj"); if(accountObj != null) { account = "" + accountObj.getId(); } @@ -120,381 +120,381 @@ public class ConsoleProxyServlet extends HttpServlet { // Do a sanity check here to make sure the user hasn't already been deleted if ((userId == null) || (account == null) || (accountObj == null) || !verifyUser(Long.valueOf(userId))) { - s_logger.debug("Invalid user/account, reject console/thumbnail access"); - sendResponse(resp, "Access denied. Invalid or inconsistent account is found"); - return; + s_logger.debug("Invalid user/account, reject console/thumbnail access"); + sendResponse(resp, "Access denied. Invalid or inconsistent account is found"); + return; } - String cmd = req.getParameter("cmd"); - if(cmd == null || !isValidCmd(cmd)) { - s_logger.debug("invalid console servlet command: " + cmd); - sendResponse(resp, ""); - return; - } + String cmd = req.getParameter("cmd"); + if(cmd == null || !isValidCmd(cmd)) { + s_logger.debug("invalid console servlet command: " + cmd); + sendResponse(resp, ""); + return; + } - String vmIdString = req.getParameter("vm"); - Long vmId = _identityService.getIdentityId("vm_instance", vmIdString); - if(vmId == null) { - s_logger.info("invalid console servlet command parameter: " + vmIdString); - sendResponse(resp, ""); - return; - } - - if(!checkSessionPermision(req, vmId, accountObj)) { - sendResponse(resp, "Permission denied"); - return; - } - - if(cmd.equalsIgnoreCase("thumbnail")) { + String vmIdString = req.getParameter("vm"); + Long vmId = _identityService.getIdentityId("vm_instance", vmIdString); + if(vmId == null) { + s_logger.info("invalid console servlet command parameter: " + vmIdString); + sendResponse(resp, ""); + return; + } + + if(!checkSessionPermision(req, vmId, accountObj)) { + sendResponse(resp, "Permission denied"); + return; + } + + if(cmd.equalsIgnoreCase("thumbnail")) { handleThumbnailRequest(req, resp, vmId); } else if(cmd.equalsIgnoreCase("access")) { handleAccessRequest(req, resp, vmId); } else { handleAuthRequest(req, resp, vmId); } - } catch (Throwable e) { - s_logger.error("Unexepected exception in ConsoleProxyServlet", e); - sendResponse(resp, "Server Internal Error"); - } - } - - private void handleThumbnailRequest(HttpServletRequest req, HttpServletResponse resp, long vmId) { - VMInstanceVO vm = _vmMgr.findById(vmId); - if(vm == null) { - s_logger.warn("VM " + vmId + " does not exist, sending blank response for thumbnail request"); - sendResponse(resp, ""); - return; - } - - if(vm.getHostId() == null) { - s_logger.warn("VM " + vmId + " lost host info, sending blank response for thumbnail request"); - sendResponse(resp, ""); - return; - } - - HostVO host = _ms.getHostBy(vm.getHostId()); - if(host == null) { - s_logger.warn("VM " + vmId + "'s host does not exist, sending blank response for thumbnail request"); - sendResponse(resp, ""); - return; - } - - String rootUrl = _ms.getConsoleAccessUrlRoot(vmId); - if(rootUrl == null) { - sendResponse(resp, ""); - return; - } - - int w = DEFAULT_THUMBNAIL_WIDTH; - int h = DEFAULT_THUMBNAIL_HEIGHT; - - String value = req.getParameter("w"); - try { - w = Integer.parseInt(value); - } catch(NumberFormatException e) { - } - - value = req.getParameter("h"); - try { - h = Integer.parseInt(value); - } catch(NumberFormatException e) { - } - - try { - resp.sendRedirect(composeThumbnailUrl(rootUrl, vm, host, w, h)); - } catch (IOException e) { - if(s_logger.isInfoEnabled()) { + } catch (Throwable e) { + s_logger.error("Unexepected exception in ConsoleProxyServlet", e); + sendResponse(resp, "Server Internal Error"); + } + } + + private void handleThumbnailRequest(HttpServletRequest req, HttpServletResponse resp, long vmId) { + VMInstanceVO vm = _vmMgr.findById(vmId); + if(vm == null) { + s_logger.warn("VM " + vmId + " does not exist, sending blank response for thumbnail request"); + sendResponse(resp, ""); + return; + } + + if(vm.getHostId() == null) { + s_logger.warn("VM " + vmId + " lost host info, sending blank response for thumbnail request"); + sendResponse(resp, ""); + return; + } + + HostVO host = _ms.getHostBy(vm.getHostId()); + if(host == null) { + s_logger.warn("VM " + vmId + "'s host does not exist, sending blank response for thumbnail request"); + sendResponse(resp, ""); + return; + } + + String rootUrl = _ms.getConsoleAccessUrlRoot(vmId); + if(rootUrl == null) { + sendResponse(resp, ""); + return; + } + + int w = DEFAULT_THUMBNAIL_WIDTH; + int h = DEFAULT_THUMBNAIL_HEIGHT; + + String value = req.getParameter("w"); + try { + w = Integer.parseInt(value); + } catch(NumberFormatException e) { + } + + value = req.getParameter("h"); + try { + h = Integer.parseInt(value); + } catch(NumberFormatException e) { + } + + try { + resp.sendRedirect(composeThumbnailUrl(rootUrl, vm, host, w, h)); + } catch (IOException e) { + if(s_logger.isInfoEnabled()) { s_logger.info("Client may already close the connection"); } - } - } - - private void handleAccessRequest(HttpServletRequest req, HttpServletResponse resp, long vmId) { - VMInstanceVO vm = _vmMgr.findById(vmId); - if(vm == null) { - s_logger.warn("VM " + vmId + " does not exist, sending blank response for console access request"); - sendResponse(resp, ""); - return; - } - - if(vm.getHostId() == null) { - s_logger.warn("VM " + vmId + " lost host info, sending blank response for console access request"); - sendResponse(resp, ""); - return; - } - - HostVO host = _ms.getHostBy(vm.getHostId()); - if(host == null) { - s_logger.warn("VM " + vmId + "'s host does not exist, sending blank response for console access request"); - sendResponse(resp, ""); - return; - } - - String rootUrl = _ms.getConsoleAccessUrlRoot(vmId); - if(rootUrl == null) { - sendResponse(resp, "

Console access will be ready in a few minutes. Please try it again later.

"); - return; - } - - String vmName = vm.getHostName(); - if(vm.getType() == VirtualMachine.Type.User) { - UserVm userVm = (UserVm)_vmMgr.findByIdAndType(VirtualMachine.Type.User, vmId); - String displayName = userVm.getDisplayName(); - if(displayName != null && !displayName.isEmpty() && !displayName.equals(vmName)) { - vmName += "(" + displayName + ")"; - } - } - - StringBuffer sb = new StringBuffer(); - sb.append("").append(escapeHTML(vmName)).append(""); - s_logger.debug("the console url is :: " + sb.toString()); - sendResponse(resp, sb.toString()); - } - - private void handleAuthRequest(HttpServletRequest req, HttpServletResponse resp, long vmId) { - - // TODO authentication channel between console proxy VM and management server needs to be secured, - // the data is now being sent through private network, but this is apparently not enough - VMInstanceVO vm = _vmMgr.findById(vmId); - if(vm == null) { - s_logger.warn("VM " + vmId + " does not exist, sending failed response for authentication request from console proxy"); - sendResponse(resp, "failed"); - return; - } - - if(vm.getHostId() == null) { - s_logger.warn("VM " + vmId + " lost host info, failed response for authentication request from console proxy"); - sendResponse(resp, "failed"); - return; - } - - HostVO host = _ms.getHostBy(vm.getHostId()); - if(host == null) { - s_logger.warn("VM " + vmId + "'s host does not exist, sending failed response for authentication request from console proxy"); - sendResponse(resp, "failed"); - return; - } - - String sid = req.getParameter("sid"); - if(sid == null || !sid.equals(vm.getVncPassword())) { - s_logger.warn("sid " + sid + " in url does not match stored sid " + vm.getVncPassword()); - sendResponse(resp, "failed"); - return; - } - - sendResponse(resp, "success"); - } - - // put the ugly stuff here - static public Ternary parseHostInfo(String hostInfo) { - String host = null; - String tunnelUrl = null; - String tunnelSession = null; - - s_logger.info("Parse host info returned from executing GetVNCPortCommand. host info: " + hostInfo); - - if(hostInfo != null && hostInfo.startsWith("consoleurl")) { - String tokens[] = hostInfo.split("&"); - - if(hostInfo.length() > 19 && hostInfo.indexOf('/', 19) > 19) { - host = hostInfo.substring(19, hostInfo.indexOf('/', 19)).trim(); - tunnelUrl = tokens[0].substring("consoleurl=".length()); - tunnelSession = tokens[1].split("=")[1]; - } else { - host = ""; - } - } else { - host = hostInfo; } - + } + + private void handleAccessRequest(HttpServletRequest req, HttpServletResponse resp, long vmId) { + VMInstanceVO vm = _vmMgr.findById(vmId); + if(vm == null) { + s_logger.warn("VM " + vmId + " does not exist, sending blank response for console access request"); + sendResponse(resp, ""); + return; + } + + if(vm.getHostId() == null) { + s_logger.warn("VM " + vmId + " lost host info, sending blank response for console access request"); + sendResponse(resp, ""); + return; + } + + HostVO host = _ms.getHostBy(vm.getHostId()); + if(host == null) { + s_logger.warn("VM " + vmId + "'s host does not exist, sending blank response for console access request"); + sendResponse(resp, ""); + return; + } + + String rootUrl = _ms.getConsoleAccessUrlRoot(vmId); + if(rootUrl == null) { + sendResponse(resp, "

Console access will be ready in a few minutes. Please try it again later.

"); + return; + } + + String vmName = vm.getHostName(); + if(vm.getType() == VirtualMachine.Type.User) { + UserVm userVm = (UserVm)_vmMgr.findByIdAndType(VirtualMachine.Type.User, vmId); + String displayName = userVm.getDisplayName(); + if(displayName != null && !displayName.isEmpty() && !displayName.equals(vmName)) { + vmName += "(" + displayName + ")"; + } + } + + StringBuffer sb = new StringBuffer(); + sb.append("").append(escapeHTML(vmName)).append(""); + s_logger.debug("the console url is :: " + sb.toString()); + sendResponse(resp, sb.toString()); + } + + private void handleAuthRequest(HttpServletRequest req, HttpServletResponse resp, long vmId) { + + // TODO authentication channel between console proxy VM and management server needs to be secured, + // the data is now being sent through private network, but this is apparently not enough + VMInstanceVO vm = _vmMgr.findById(vmId); + if(vm == null) { + s_logger.warn("VM " + vmId + " does not exist, sending failed response for authentication request from console proxy"); + sendResponse(resp, "failed"); + return; + } + + if(vm.getHostId() == null) { + s_logger.warn("VM " + vmId + " lost host info, failed response for authentication request from console proxy"); + sendResponse(resp, "failed"); + return; + } + + HostVO host = _ms.getHostBy(vm.getHostId()); + if(host == null) { + s_logger.warn("VM " + vmId + "'s host does not exist, sending failed response for authentication request from console proxy"); + sendResponse(resp, "failed"); + return; + } + + String sid = req.getParameter("sid"); + if(sid == null || !sid.equals(vm.getVncPassword())) { + s_logger.warn("sid " + sid + " in url does not match stored sid " + vm.getVncPassword()); + sendResponse(resp, "failed"); + return; + } + + sendResponse(resp, "success"); + } + + // put the ugly stuff here + static public Ternary parseHostInfo(String hostInfo) { + String host = null; + String tunnelUrl = null; + String tunnelSession = null; + + s_logger.info("Parse host info returned from executing GetVNCPortCommand. host info: " + hostInfo); + + if(hostInfo != null && hostInfo.startsWith("consoleurl")) { + String tokens[] = hostInfo.split("&"); + + if(hostInfo.length() > 19 && hostInfo.indexOf('/', 19) > 19) { + host = hostInfo.substring(19, hostInfo.indexOf('/', 19)).trim(); + tunnelUrl = tokens[0].substring("consoleurl=".length()); + tunnelSession = tokens[1].split("=")[1]; + } else { + host = ""; + } + } else { + host = hostInfo; + } + return new Ternary(host, tunnelUrl, tunnelSession); - } - - private String composeThumbnailUrl(String rootUrl, VMInstanceVO vm, HostVO hostVo, int w, int h) { - StringBuffer sb = new StringBuffer(rootUrl); + } - String host = hostVo.getPrivateIpAddress(); - - Pair portInfo = _ms.getVncPort(vm); - Ternary parsedHostInfo = parseHostInfo(portInfo.first()); - - String sid = vm.getVncPassword(); - String tag = String.valueOf(vm.getId()); - tag = _identityService.getIdentityUuid("vm_instance", tag); - String ticket = genAccessTicket(host, String.valueOf(portInfo.second()), sid, tag); + private String composeThumbnailUrl(String rootUrl, VMInstanceVO vm, HostVO hostVo, int w, int h) { + StringBuffer sb = new StringBuffer(rootUrl); - ConsoleProxyPasswordBasedEncryptor encryptor = new ConsoleProxyPasswordBasedEncryptor(_ms.getHashKey()); - ConsoleProxyClientParam param = new ConsoleProxyClientParam(); - param.setClientHostAddress(parsedHostInfo.first()); - param.setClientHostPort(portInfo.second()); - param.setClientHostPassword(sid); - param.setClientTag(tag); - param.setTicket(ticket); - if(parsedHostInfo.second() != null && parsedHostInfo.third() != null) { - param.setClientTunnelUrl(parsedHostInfo.second()); - param.setClientTunnelSession(parsedHostInfo.third()); - } - - sb.append("/ajax?token=" + encryptor.encryptObject(ConsoleProxyClientParam.class, param)); - sb.append("&w=").append(w).append("&h=").append(h); - - if(s_logger.isDebugEnabled()) { + String host = hostVo.getPrivateIpAddress(); + + Pair portInfo = _ms.getVncPort(vm); + Ternary parsedHostInfo = parseHostInfo(portInfo.first()); + + String sid = vm.getVncPassword(); + String tag = String.valueOf(vm.getId()); + tag = _identityService.getIdentityUuid("vm_instance", tag); + String ticket = genAccessTicket(host, String.valueOf(portInfo.second()), sid, tag); + + ConsoleProxyPasswordBasedEncryptor encryptor = new ConsoleProxyPasswordBasedEncryptor(_ms.getHashKey()); + ConsoleProxyClientParam param = new ConsoleProxyClientParam(); + param.setClientHostAddress(parsedHostInfo.first()); + param.setClientHostPort(portInfo.second()); + param.setClientHostPassword(sid); + param.setClientTag(tag); + param.setTicket(ticket); + if(parsedHostInfo.second() != null && parsedHostInfo.third() != null) { + param.setClientTunnelUrl(parsedHostInfo.second()); + param.setClientTunnelSession(parsedHostInfo.third()); + } + + sb.append("/ajax?token=" + encryptor.encryptObject(ConsoleProxyClientParam.class, param)); + sb.append("&w=").append(w).append("&h=").append(h); + + if(s_logger.isDebugEnabled()) { s_logger.debug("Compose thumbnail url: " + sb.toString()); } - return sb.toString(); - } - - private String composeConsoleAccessUrl(String rootUrl, VMInstanceVO vm, HostVO hostVo) { - StringBuffer sb = new StringBuffer(rootUrl); - String host = hostVo.getPrivateIpAddress(); - - Pair portInfo = _ms.getVncPort(vm); - if(s_logger.isDebugEnabled()) - s_logger.debug("Port info " + portInfo.first()); + return sb.toString(); + } - Ternary parsedHostInfo = parseHostInfo(portInfo.first()); + private String composeConsoleAccessUrl(String rootUrl, VMInstanceVO vm, HostVO hostVo) { + StringBuffer sb = new StringBuffer(rootUrl); + String host = hostVo.getPrivateIpAddress(); - String sid = vm.getVncPassword(); - String tag = String.valueOf(vm.getId()); - tag = _identityService.getIdentityUuid("vm_instance", tag); - String ticket = genAccessTicket(host, String.valueOf(portInfo.second()), sid, tag); - ConsoleProxyPasswordBasedEncryptor encryptor = new ConsoleProxyPasswordBasedEncryptor(_ms.getHashKey()); - ConsoleProxyClientParam param = new ConsoleProxyClientParam(); - param.setClientHostAddress(parsedHostInfo.first()); - param.setClientHostPort(portInfo.second()); - param.setClientHostPassword(sid); - param.setClientTag(tag); - param.setTicket(ticket); - if(parsedHostInfo.second() != null && parsedHostInfo.third() != null) { - param.setClientTunnelUrl(parsedHostInfo.second()); - param.setClientTunnelSession(parsedHostInfo.third()); - } - - sb.append("/ajax?token=" + encryptor.encryptObject(ConsoleProxyClientParam.class, param)); - - // for console access, we need guest OS type to help implement keyboard - long guestOs = vm.getGuestOSId(); - GuestOSVO guestOsVo = _ms.getGuestOs(guestOs); - if(guestOsVo.getCategoryId() == 6) - sb.append("&guest=windows"); - - if(s_logger.isDebugEnabled()) { + Pair portInfo = _ms.getVncPort(vm); + if(s_logger.isDebugEnabled()) + s_logger.debug("Port info " + portInfo.first()); + + Ternary parsedHostInfo = parseHostInfo(portInfo.first()); + + String sid = vm.getVncPassword(); + String tag = String.valueOf(vm.getId()); + tag = _identityService.getIdentityUuid("vm_instance", tag); + String ticket = genAccessTicket(host, String.valueOf(portInfo.second()), sid, tag); + ConsoleProxyPasswordBasedEncryptor encryptor = new ConsoleProxyPasswordBasedEncryptor(_ms.getHashKey()); + ConsoleProxyClientParam param = new ConsoleProxyClientParam(); + param.setClientHostAddress(parsedHostInfo.first()); + param.setClientHostPort(portInfo.second()); + param.setClientHostPassword(sid); + param.setClientTag(tag); + param.setTicket(ticket); + if(parsedHostInfo.second() != null && parsedHostInfo.third() != null) { + param.setClientTunnelUrl(parsedHostInfo.second()); + param.setClientTunnelSession(parsedHostInfo.third()); + } + + sb.append("/ajax?token=" + encryptor.encryptObject(ConsoleProxyClientParam.class, param)); + + // for console access, we need guest OS type to help implement keyboard + long guestOs = vm.getGuestOSId(); + GuestOSVO guestOsVo = _ms.getGuestOs(guestOs); + if(guestOsVo.getCategoryId() == 6) + sb.append("&guest=windows"); + + if(s_logger.isDebugEnabled()) { s_logger.debug("Compose console url: " + sb.toString()); } - return sb.toString(); - } - - public static String genAccessTicket(String host, String port, String sid, String tag) { - return genAccessTicket(host, port, sid, tag, new Date()); - } - - public static String genAccessTicket(String host, String port, String sid, String tag, Date normalizedHashTime) { - String params = "host=" + host + "&port=" + port + "&sid=" + sid + "&tag=" + tag; - - try { - Mac mac = Mac.getInstance("HmacSHA1"); - - long ts = normalizedHashTime.getTime(); - ts = ts/60000; // round up to 1 minute - String secretKey = _ms.getHashKey(); - - SecretKeySpec keySpec = new SecretKeySpec(secretKey.getBytes(), "HmacSHA1"); - mac.init(keySpec); - mac.update(params.getBytes()); - mac.update(String.valueOf(ts).getBytes()); - - byte[] encryptedBytes = mac.doFinal(); - - return Base64.encodeBase64URLSafeString(encryptedBytes); - } catch(Exception e) { - s_logger.error("Unexpected exception ", e); - } - return ""; - } - - private void sendResponse(HttpServletResponse resp, String content) { - try { - resp.setContentType("text/html"); - resp.getWriter().print(content); - } catch(IOException e) { - if(s_logger.isInfoEnabled()) { + return sb.toString(); + } + + public static String genAccessTicket(String host, String port, String sid, String tag) { + return genAccessTicket(host, port, sid, tag, new Date()); + } + + public static String genAccessTicket(String host, String port, String sid, String tag, Date normalizedHashTime) { + String params = "host=" + host + "&port=" + port + "&sid=" + sid + "&tag=" + tag; + + try { + Mac mac = Mac.getInstance("HmacSHA1"); + + long ts = normalizedHashTime.getTime(); + ts = ts/60000; // round up to 1 minute + String secretKey = _ms.getHashKey(); + + SecretKeySpec keySpec = new SecretKeySpec(secretKey.getBytes(), "HmacSHA1"); + mac.init(keySpec); + mac.update(params.getBytes()); + mac.update(String.valueOf(ts).getBytes()); + + byte[] encryptedBytes = mac.doFinal(); + + return Base64.encodeBase64String(encryptedBytes); + } catch(Exception e) { + s_logger.error("Unexpected exception ", e); + } + return ""; + } + + private void sendResponse(HttpServletResponse resp, String content) { + try { + resp.setContentType("text/html"); + resp.getWriter().print(content); + } catch(IOException e) { + if(s_logger.isInfoEnabled()) { s_logger.info("Client may already close the connection"); } - } - } - - private boolean checkSessionPermision(HttpServletRequest req, long vmId, Account accountObj) { + } + } + + private boolean checkSessionPermision(HttpServletRequest req, long vmId, Account accountObj) { VMInstanceVO vm = _vmMgr.findById(vmId); if(vm == null) { - s_logger.debug("Console/thumbnail access denied. VM " + vmId + " does not exist in system any more"); - return false; + s_logger.debug("Console/thumbnail access denied. VM " + vmId + " does not exist in system any more"); + return false; } - + // root admin can access anything - if(accountObj.getType() == Account.ACCOUNT_TYPE_ADMIN) - return true; + if(accountObj.getType() == Account.ACCOUNT_TYPE_ADMIN) + return true; switch(vm.getType()) { case User : - try { - _accountMgr.checkAccess(accountObj, null, true, vm); - } catch (PermissionDeniedException ex) { - if (accountObj.getType() == Account.ACCOUNT_TYPE_NORMAL) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM access is denied. VM owner account " + vm.getAccountId() - + " does not match the account id in session " + accountObj.getId() + " and caller is a normal user"); - } - } else if(accountObj.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN || accountObj.getType() == Account.ACCOUNT_TYPE_READ_ONLY_ADMIN) { - if(s_logger.isDebugEnabled()) { - s_logger.debug("VM access is denied. VM owner account " + vm.getAccountId() - + " does not match the account id in session " + accountObj.getId() + " and the domain-admin caller does not manage the target domain"); - } - } - return false; - } - break; - + try { + _accountMgr.checkAccess(accountObj, null, true, vm); + } catch (PermissionDeniedException ex) { + if (accountObj.getType() == Account.ACCOUNT_TYPE_NORMAL) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("VM access is denied. VM owner account " + vm.getAccountId() + + " does not match the account id in session " + accountObj.getId() + " and caller is a normal user"); + } + } else if(accountObj.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN || accountObj.getType() == Account.ACCOUNT_TYPE_READ_ONLY_ADMIN) { + if(s_logger.isDebugEnabled()) { + s_logger.debug("VM access is denied. VM owner account " + vm.getAccountId() + + " does not match the account id in session " + accountObj.getId() + " and the domain-admin caller does not manage the target domain"); + } + } + return false; + } + break; + case DomainRouter: case ConsoleProxy : case SecondaryStorageVm: - return false; - - default : - s_logger.warn("Unrecoginized virtual machine type, deny access by default. type: " + vm.getType()); - return false; + return false; + + default : + s_logger.warn("Unrecoginized virtual machine type, deny access by default. type: " + vm.getType()); + return false; } - - return true; - } - - private boolean isValidCmd(String cmd) { - if(cmd.equalsIgnoreCase("thumbnail") || cmd.equalsIgnoreCase("access") || cmd.equalsIgnoreCase("auth")) { + + return true; + } + + private boolean isValidCmd(String cmd) { + if(cmd.equalsIgnoreCase("thumbnail") || cmd.equalsIgnoreCase("access") || cmd.equalsIgnoreCase("auth")) { return true; } - - return false; - } - - public boolean verifyUser(Long userId) { - // copy from ApiServer.java, a bit ugly here - User user = _accountMgr.getUserIncludingRemoved(userId); - Account account = null; - if (user != null) { - account = _accountMgr.getAccount(user.getAccountId()); - } - if ((user == null) || (user.getRemoved() != null) || !user.getState().equals(Account.State.enabled) - || (account == null) || !account.getState().equals(Account.State.enabled)) { - s_logger.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); - return false; - } - return true; + return false; } - - // copied and modified from ApiServer.java. + + public boolean verifyUser(Long userId) { + // copy from ApiServer.java, a bit ugly here + User user = _accountMgr.getUserIncludingRemoved(userId); + Account account = null; + if (user != null) { + account = _accountMgr.getAccount(user.getAccountId()); + } + + if ((user == null) || (user.getRemoved() != null) || !user.getState().equals(Account.State.enabled) + || (account == null) || !account.getState().equals(Account.State.enabled)) { + s_logger.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); + return false; + } + return true; + } + + // copied and modified from ApiServer.java. // TODO need to replace the whole servlet with a API command private boolean verifyRequest(Map requestParameters) { try { @@ -516,7 +516,7 @@ public class ConsoleProxyServlet extends HttpServlet { for (String paramName : parameterNames) { // parameters come as name/value pairs in the form String/String[] String paramValue = ((String[])requestParameters.get(paramName))[0]; - + if ("signature".equalsIgnoreCase(paramName)) { signature = paramValue; } else { @@ -531,7 +531,7 @@ public class ConsoleProxyServlet extends HttpServlet { } } } - + // if api/secret key are passed to the parameters if ((signature == null) || (apiKey == null)) { @@ -573,16 +573,16 @@ public class ConsoleProxyServlet extends HttpServlet { mac.init(keySpec); mac.update(unsignedRequest.getBytes()); byte[] encryptedBytes = mac.doFinal(); - String computedSignature = com.cloud.utils.encoding.Base64.encodeBytes(encryptedBytes); + String computedSignature = Base64.encodeBase64URLSafeString(encryptedBytes); boolean equalSig = signature.equals(computedSignature); if (!equalSig) { - s_logger.debug("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); + s_logger.debug("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); } - + if(equalSig) { - requestParameters.put("userid", new Object[] {String.valueOf(user.getId())}); - requestParameters.put("account", new Object[] {account.getAccountName()}); - requestParameters.put("accountobj", new Object[] { account }); + requestParameters.put("userid", new Object[] {String.valueOf(user.getId())}); + requestParameters.put("account", new Object[] {account.getAccountName()}); + requestParameters.put("accountobj", new Object[] { account }); } return equalSig; } catch (Exception ex) { @@ -590,23 +590,23 @@ public class ConsoleProxyServlet extends HttpServlet { } return false; } - + public static final String escapeHTML(String content){ if(content == null || content.isEmpty()) return content; - + StringBuffer sb = new StringBuffer(); for (int i = 0; i < content.length(); i++) { - char c = content.charAt(i); - switch (c) { - case '<': sb.append("<"); break; - case '>': sb.append(">"); break; - case '&': sb.append("&"); break; - case '"': sb.append("""); break; - case ' ': sb.append(" ");break; - default: sb.append(c); break; - } + char c = content.charAt(i); + switch (c) { + case '<': sb.append("<"); break; + case '>': sb.append(">"); break; + case '&': sb.append("&"); break; + case '"': sb.append("""); break; + case ' ': sb.append(" ");break; + default: sb.append(c); break; + } } return sb.toString(); - } + } } diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index 38153f30618..65314980652 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -256,6 +256,9 @@ public class AccountManagerImpl implements AccountManager, AccountService, Manag s_logger.error("Unable to find an user authenticator."); } + //initialize admin context + UserContext.setAdminContext(new UserContext(getSystemUser().getId(), getSystemAccount(), null, false)); + return true; } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 82693e9c97e..7a70eb05e5c 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import javax.ejb.Local; import javax.naming.ConfigurationException; +import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; import com.cloud.acl.ControlledEntity.ACLType; @@ -1364,12 +1365,12 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.Status.BackedUp + " state yet and can't be used for template creation"); } -/* + /* // bug #11428. Operation not supported if vmware and snapshots parent volume = ROOT if(snapshot.getHypervisorType() == HypervisorType.VMware && snapshotVolume.getVolumeType() == Type.DATADISK){ throw new UnsupportedServiceException("operation not supported, snapshot with id " + snapshotId + " is created from Data Disk"); } -*/ + */ hyperType = snapshot.getHypervisorType(); } @@ -2527,7 +2528,7 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager if (userData.length() >= 2 * MAX_USER_DATA_LENGTH_BYTES) { throw new InvalidParameterValueException("User data is too long"); } - decodedUserData = org.apache.commons.codec.binary.Base64.decodeBase64(userData.getBytes()); + decodedUserData = Base64.decodeBase64(userData.getBytes()); if (decodedUserData.length > MAX_USER_DATA_LENGTH_BYTES) { throw new InvalidParameterValueException("User data is too long"); } diff --git a/setup/.classpath b/setup/.classpath new file mode 100644 index 00000000000..ac37fb2e4bc --- /dev/null +++ b/setup/.classpath @@ -0,0 +1,5 @@ + + + + + diff --git a/setup/db/db/schema-303to40.sql b/setup/db/db/schema-303to40.sql index 39b52651a64..a44e64ea4c9 100644 --- a/setup/db/db/schema-303to40.sql +++ b/setup/db/db/schema-303to40.sql @@ -85,4 +85,4 @@ SET SQL_SAFE_UPDATES=0; UPDATE `cloud`.`hypervisor_capabilities` SET `max_data_volumes_limit`=13 WHERE `hypervisor_type`='XenServer' AND (`hypervisor_version`='6.0' OR `hypervisor_version`='6.0.2'); SET SQL_SAFE_UPDATES=1; INSERT INTO `cloud`.`configuration` (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT', 'management-server', 'event.purge.interval', '86400', 'The interval (in seconds) to wait before running the event purge thread'); - +UPDATE `cloud`.`configuration` SET description='Do URL encoding for the api response, false by default' WHERE name='encode.api.response'; diff --git a/test/src/com/cloud/sample/UserCloudAPIExecutor.java b/test/src/com/cloud/sample/UserCloudAPIExecutor.java index 0917409e6db..c1246d4f940 100644 --- a/test/src/com/cloud/sample/UserCloudAPIExecutor.java +++ b/test/src/com/cloud/sample/UserCloudAPIExecutor.java @@ -31,10 +31,9 @@ import org.apache.commons.httpclient.HttpClient; import org.apache.commons.httpclient.HttpMethod; import org.apache.commons.httpclient.methods.GetMethod; -import com.cloud.utils.encoding.Base64; /** -* + * * * * diff --git a/test/src/com/cloud/test/stress/StressTestDirectAttach.java b/test/src/com/cloud/test/stress/StressTestDirectAttach.java index a4afb804f5e..9f77d25fb6d 100644 --- a/test/src/com/cloud/test/stress/StressTestDirectAttach.java +++ b/test/src/com/cloud/test/stress/StressTestDirectAttach.java @@ -35,6 +35,7 @@ import javax.crypto.spec.SecretKeySpec; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; +import org.apache.commons.codec.binary.Base64; import org.apache.commons.httpclient.HttpClient; import org.apache.commons.httpclient.HttpException; import org.apache.commons.httpclient.HttpMethod; @@ -46,8 +47,6 @@ import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; - -import com.cloud.utils.encoding.Base64; import com.cloud.utils.exception.CloudRuntimeException; import com.trilead.ssh2.ChannelCondition; import com.trilead.ssh2.Connection; @@ -55,28 +54,28 @@ import com.trilead.ssh2.SCPClient; import com.trilead.ssh2.Session; public class StressTestDirectAttach { - private static long sleepTime = 180000L; // default 0 - private static boolean cleanUp = true; - public static final Logger s_logger = Logger - .getLogger(TestClientWithAPI.class.getName()); - private static boolean repeat = true; - private static String[] users = null; - private static boolean internet = false; - private static ThreadLocal _linuxIP = new ThreadLocal(); - private static ThreadLocal _linuxVmId = new ThreadLocal(); - private static ThreadLocal _linuxVmId1 = new ThreadLocal(); - private static ThreadLocal _linuxPassword = new ThreadLocal(); - private static ThreadLocal _windowsIP = new ThreadLocal(); - private static ThreadLocal _secretKey = new ThreadLocal(); - private static ThreadLocal _apiKey = new ThreadLocal(); - private static ThreadLocal _userId = new ThreadLocal(); - private static ThreadLocal _account = new ThreadLocal(); - private static ThreadLocal _domainRouterId = new ThreadLocal(); - private static ThreadLocal _newVolume = new ThreadLocal(); - private static ThreadLocal _newVolume1 = new ThreadLocal(); - private static DocumentBuilderFactory factory = DocumentBuilderFactory - .newInstance(); - private static int usageIterator = 1; + private static long sleepTime = 180000L; // default 0 + private static boolean cleanUp = true; + public static final Logger s_logger = Logger + .getLogger(TestClientWithAPI.class.getName()); + private static boolean repeat = true; + private static String[] users = null; + private static boolean internet = false; + private static ThreadLocal _linuxIP = new ThreadLocal(); + private static ThreadLocal _linuxVmId = new ThreadLocal(); + private static ThreadLocal _linuxVmId1 = new ThreadLocal(); + private static ThreadLocal _linuxPassword = new ThreadLocal(); + private static ThreadLocal _windowsIP = new ThreadLocal(); + private static ThreadLocal _secretKey = new ThreadLocal(); + private static ThreadLocal _apiKey = new ThreadLocal(); + private static ThreadLocal _userId = new ThreadLocal(); + private static ThreadLocal _account = new ThreadLocal(); + private static ThreadLocal _domainRouterId = new ThreadLocal(); + private static ThreadLocal _newVolume = new ThreadLocal(); + private static ThreadLocal _newVolume1 = new ThreadLocal(); + private static DocumentBuilderFactory factory = DocumentBuilderFactory + .newInstance(); + private static int usageIterator = 1; private static int numThreads = 1; private static int wait = 5000; private static String accountName = null; @@ -85,363 +84,364 @@ public class StressTestDirectAttach { private static String diskOfferingId="11"; private static String diskOfferingId1="12"; - private static final int MAX_RETRY_LINUX = 10; - private static final int MAX_RETRY_WIN = 10; - + private static final int MAX_RETRY_LINUX = 10; + private static final int MAX_RETRY_WIN = 10; - public static void main(String[] args) { - String host = "http://localhost"; - String port = "8092"; - String devPort = "8080"; - String apiUrl = "/client/api"; - try { - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - // host - if (arg.equals("-h")) { - host = "http://" + iter.next(); - } + public static void main(String[] args) { + String host = "http://localhost"; + String port = "8092"; + String devPort = "8080"; + String apiUrl = "/client/api"; - if (arg.equals("-p")) { - port = iter.next(); - } - if (arg.equals("-dp")) { - devPort = iter.next(); - } + try { + // Parameters + List argsList = Arrays.asList(args); + Iterator iter = argsList.iterator(); + while (iter.hasNext()) { + String arg = iter.next(); + // host + if (arg.equals("-h")) { + host = "http://" + iter.next(); + } - if (arg.equals("-t")) { - numThreads = Integer.parseInt(iter.next()); - } + if (arg.equals("-p")) { + port = iter.next(); + } + if (arg.equals("-dp")) { + devPort = iter.next(); + } - if (arg.equals("-s")) { - sleepTime = Long.parseLong(iter.next()); - } - if (arg.equals("-a")) { - accountName = iter.next(); - } + if (arg.equals("-t")) { + numThreads = Integer.parseInt(iter.next()); + } - if (arg.equals("-c")) { - cleanUp = Boolean.parseBoolean(iter.next()); - if (!cleanUp) - sleepTime = 0L; // no need to wait if we don't ever - // cleanup - } + if (arg.equals("-s")) { + sleepTime = Long.parseLong(iter.next()); + } + if (arg.equals("-a")) { + accountName = iter.next(); + } - if (arg.equals("-r")) { - repeat = Boolean.parseBoolean(iter.next()); - } + if (arg.equals("-c")) { + cleanUp = Boolean.parseBoolean(iter.next()); + if (!cleanUp) + sleepTime = 0L; // no need to wait if we don't ever + // cleanup + } - if (arg.equals("-i")) { - internet = Boolean.parseBoolean(iter.next()); - } - - if (arg.equals("-w")) { - wait = Integer.parseInt(iter.next()); - } - - if (arg.equals("-z")) { - zoneId = iter.next(); - } - - if (arg.equals("-so")) { - serviceOfferingId = iter.next(); - } - - } + if (arg.equals("-r")) { + repeat = Boolean.parseBoolean(iter.next()); + } - final String server = host + ":" + port + "/"; - final String developerServer = host + ":" + devPort + apiUrl; - s_logger.info("Starting test against server: " + server + " with " - + numThreads + " thread(s)"); - if (cleanUp) - s_logger.info("Clean up is enabled, each test will wait " - + sleepTime + " ms before cleaning up"); + if (arg.equals("-i")) { + internet = Boolean.parseBoolean(iter.next()); + } + + if (arg.equals("-w")) { + wait = Integer.parseInt(iter.next()); + } + + if (arg.equals("-z")) { + zoneId = iter.next(); + } + + if (arg.equals("-so")) { + serviceOfferingId = iter.next(); + } + + } + + final String server = host + ":" + port + "/"; + final String developerServer = host + ":" + devPort + apiUrl; + s_logger.info("Starting test against server: " + server + " with " + + numThreads + " thread(s)"); + if (cleanUp) + s_logger.info("Clean up is enabled, each test will wait " + + sleepTime + " ms before cleaning up"); - for (int i = 0; i < numThreads; i++) { - new Thread(new Runnable() { - public void run() { - do { - String username = null; - try { - long now = System.currentTimeMillis(); - Random ran = new Random(); - username = Math.abs(ran.nextInt())+ "-user"; - NDC.push(username); - - s_logger.info("Starting test for the user " + username); - int response = executeDeployment(server, - developerServer, username); - boolean success = false; - String reason = null; - - - - - if (response == 200) { - success = true; - if (internet) { - s_logger - .info("Deploy successful...waiting 5 minute before SSH tests"); - Thread.sleep(300000L); // Wait 60 - // seconds so - // the windows VM - // can boot up and do a sys prep. - - s_logger.info("Begin Linux SSH test for account " + _account.get()); - reason = sshTest(_linuxIP.get(), _linuxPassword.get()); - + for (int i = 0; i < numThreads; i++) { + new Thread(new Runnable() { + @Override + public void run() { + do { + String username = null; + try { + long now = System.currentTimeMillis(); + Random ran = new Random(); + username = Math.abs(ran.nextInt())+ "-user"; + NDC.push(username); - if (reason == null) { - s_logger - .info("Linux SSH test successful for account " + _account.get()); - } - } - if (reason == null) { - if (internet) { - s_logger - .info("Windows SSH test successful for account " + _account.get()); - } else { - s_logger - .info("deploy test successful....now cleaning up"); - if (cleanUp) { - s_logger - .info("Waiting " - + sleepTime - + " ms before cleaning up vms"); - Thread.sleep(sleepTime); - } else { - success = true; - } - } + s_logger.info("Starting test for the user " + username); + int response = executeDeployment(server, + developerServer, username); + boolean success = false; + String reason = null; - if (usageIterator >= numThreads) { - int eventsAndBillingResponseCode = - executeEventsAndBilling(server, developerServer); - s_logger.info("events and usage records command finished with response code: " - + eventsAndBillingResponseCode); - usageIterator = 1; - - } - else { - s_logger.info("Skipping events and usage records for this user: usageIterator " + usageIterator+ " and number of Threads " + numThreads); - usageIterator++; - } - if ((users == null) && (accountName == null)) { - s_logger - .info("Sending cleanup command"); - int cleanupResponseCode = executeCleanup( - server, developerServer, username); - s_logger - .info("cleanup command finished with response code: " - + cleanupResponseCode); - success = (cleanupResponseCode == 200); - } else { - s_logger - .info("Sending stop DomR / destroy VM command"); - int stopResponseCode = executeStop( - server, developerServer, - username); - s_logger - .info("stop(destroy) command finished with response code: " - + stopResponseCode); - success = (stopResponseCode == 200); - } - - } else { - // Just stop but don't destroy the - // VMs/Routers - s_logger - .info("SSH test failed for account " + _account.get() + "with reason '" - + reason - + "', stopping VMs"); - int stopResponseCode = executeStop( - server, developerServer, - username); - s_logger - .info("stop command finished with response code: " - + stopResponseCode); - success = false; // since the SSH test - // failed, mark the - // whole test as - // failure - } - } else { - // Just stop but don't destroy the - // VMs/Routers - s_logger - .info("Deploy test failed with reason '" - + reason - + "', stopping VMs"); - int stopResponseCode = executeStop(server, - developerServer, username); - s_logger - .info("stop command finished with response code: " - + stopResponseCode); - success = false; // since the deploy test - // failed, mark the - // whole test as failure - } - if (success) { - s_logger - .info("***** Completed test for user : " - + username - + " in " - + ((System - .currentTimeMillis() - now) / 1000L) - + " seconds"); - - } else { - s_logger - .info("##### FAILED test for user : " - + username - + " in " - + ((System - .currentTimeMillis() - now) / 1000L) - + " seconds with reason : " - + reason); - } - s_logger.info("Sleeping for " + wait + " seconds before starting next iteration"); - Thread.sleep(wait); - } catch (Exception e) { - s_logger.warn("Error in thread", e); - try { - int stopResponseCode = executeStop(server, - developerServer, username); - s_logger.info("stop response code: " - + stopResponseCode); - } catch (Exception e1) { - } - } finally { - NDC.clear(); - } - } while (repeat); - } - }).start(); - } - } catch (Exception e) { - s_logger.error(e); - } - } - - public static Map> getMultipleValuesFromXML( - InputStream is, String[] tagNames) { - Map> returnValues = new HashMap>(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement - .getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - s_logger.error("no " + tagNames[i] - + " tag in XML response...returning null"); - } else { - List valueList = new ArrayList(); - for (int j = 0; j < targetNodes.getLength(); j++) { - Node node = targetNodes.item(j); - valueList.add(node.getTextContent()); - } - returnValues.put(tagNames[i], valueList); - } - } - } catch (Exception ex) { - s_logger.error(ex); - } - return returnValues; - } - - public static Map getSingleValueFromXML(InputStream is, - String[] tagNames) { - Map returnValues = new HashMap(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); + if (response == 200) { + success = true; + if (internet) { + s_logger + .info("Deploy successful...waiting 5 minute before SSH tests"); + Thread.sleep(300000L); // Wait 60 + // seconds so + // the windows VM + // can boot up and do a sys prep. - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement - .getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - s_logger.error("no " + tagNames[i] - + " tag in XML response...returning null"); - } else { - returnValues.put(tagNames[i], targetNodes.item(0) - .getTextContent()); - } - } - } catch (Exception ex) { - s_logger.error("error processing XML", ex); - } - return returnValues; - } - - public static Map getSingleValueFromXML(Element rootElement, - String[] tagNames) { - Map returnValues = new HashMap(); - if (rootElement == null) { - s_logger.error("Root element is null, can't get single value from xml"); - return null; - } - try { - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement - .getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - s_logger.error("no " + tagNames[i] - + " tag in XML response...returning null"); - } else { - returnValues.put(tagNames[i], targetNodes.item(0) - .getTextContent()); - } - } - } catch (Exception ex) { - s_logger.error("error processing XML", ex); - } - return returnValues; - } - + s_logger.info("Begin Linux SSH test for account " + _account.get()); + reason = sshTest(_linuxIP.get(), _linuxPassword.get()); - private static List getNonSourceNatIPs(InputStream is) { - List returnValues = new ArrayList(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - NodeList allocatedIpAddrNodes = rootElement - .getElementsByTagName("publicipaddress"); - for (int i = 0; i < allocatedIpAddrNodes.getLength(); i++) { - Node allocatedIpAddrNode = allocatedIpAddrNodes.item(i); - NodeList childNodes = allocatedIpAddrNode.getChildNodes(); - String ipAddress = null; - boolean isSourceNat = true; // assume it's source nat until we - // find otherwise - for (int j = 0; j < childNodes.getLength(); j++) { - Node n = childNodes.item(j); - if ("ipaddress".equals(n.getNodeName())) { - ipAddress = n.getTextContent(); - } else if ("issourcenat".equals(n.getNodeName())) { - isSourceNat = Boolean.parseBoolean(n.getTextContent()); - } - } - if ((ipAddress != null) && !isSourceNat) { - returnValues.add(ipAddress); - } - } - } catch (Exception ex) { - s_logger.error(ex); - } - return returnValues; - } + + if (reason == null) { + s_logger + .info("Linux SSH test successful for account " + _account.get()); + } + } + if (reason == null) { + if (internet) { + s_logger + .info("Windows SSH test successful for account " + _account.get()); + } else { + s_logger + .info("deploy test successful....now cleaning up"); + if (cleanUp) { + s_logger + .info("Waiting " + + sleepTime + + " ms before cleaning up vms"); + Thread.sleep(sleepTime); + } else { + success = true; + } + } + + if (usageIterator >= numThreads) { + int eventsAndBillingResponseCode = + executeEventsAndBilling(server, developerServer); + s_logger.info("events and usage records command finished with response code: " + + eventsAndBillingResponseCode); + usageIterator = 1; + + } + else { + s_logger.info("Skipping events and usage records for this user: usageIterator " + usageIterator+ " and number of Threads " + numThreads); + usageIterator++; + } + + if ((users == null) && (accountName == null)) { + s_logger + .info("Sending cleanup command"); + int cleanupResponseCode = executeCleanup( + server, developerServer, username); + s_logger + .info("cleanup command finished with response code: " + + cleanupResponseCode); + success = (cleanupResponseCode == 200); + } else { + s_logger + .info("Sending stop DomR / destroy VM command"); + int stopResponseCode = executeStop( + server, developerServer, + username); + s_logger + .info("stop(destroy) command finished with response code: " + + stopResponseCode); + success = (stopResponseCode == 200); + } + + } else { + // Just stop but don't destroy the + // VMs/Routers + s_logger + .info("SSH test failed for account " + _account.get() + "with reason '" + + reason + + "', stopping VMs"); + int stopResponseCode = executeStop( + server, developerServer, + username); + s_logger + .info("stop command finished with response code: " + + stopResponseCode); + success = false; // since the SSH test + // failed, mark the + // whole test as + // failure + } + } else { + // Just stop but don't destroy the + // VMs/Routers + s_logger + .info("Deploy test failed with reason '" + + reason + + "', stopping VMs"); + int stopResponseCode = executeStop(server, + developerServer, username); + s_logger + .info("stop command finished with response code: " + + stopResponseCode); + success = false; // since the deploy test + // failed, mark the + // whole test as failure + } + + if (success) { + s_logger + .info("***** Completed test for user : " + + username + + " in " + + ((System + .currentTimeMillis() - now) / 1000L) + + " seconds"); + + } else { + s_logger + .info("##### FAILED test for user : " + + username + + " in " + + ((System + .currentTimeMillis() - now) / 1000L) + + " seconds with reason : " + + reason); + } + s_logger.info("Sleeping for " + wait + " seconds before starting next iteration"); + Thread.sleep(wait); + } catch (Exception e) { + s_logger.warn("Error in thread", e); + try { + int stopResponseCode = executeStop(server, + developerServer, username); + s_logger.info("stop response code: " + + stopResponseCode); + } catch (Exception e1) { + } + } finally { + NDC.clear(); + } + } while (repeat); + } + }).start(); + } + } catch (Exception e) { + s_logger.error(e); + } + } + + + public static Map> getMultipleValuesFromXML( + InputStream is, String[] tagNames) { + Map> returnValues = new HashMap>(); + try { + DocumentBuilder docBuilder = factory.newDocumentBuilder(); + Document doc = docBuilder.parse(is); + Element rootElement = doc.getDocumentElement(); + for (int i = 0; i < tagNames.length; i++) { + NodeList targetNodes = rootElement + .getElementsByTagName(tagNames[i]); + if (targetNodes.getLength() <= 0) { + s_logger.error("no " + tagNames[i] + + " tag in XML response...returning null"); + } else { + List valueList = new ArrayList(); + for (int j = 0; j < targetNodes.getLength(); j++) { + Node node = targetNodes.item(j); + valueList.add(node.getTextContent()); + } + returnValues.put(tagNames[i], valueList); + } + } + } catch (Exception ex) { + s_logger.error(ex); + } + return returnValues; + } + + public static Map getSingleValueFromXML(InputStream is, + String[] tagNames) { + Map returnValues = new HashMap(); + try { + DocumentBuilder docBuilder = factory.newDocumentBuilder(); + Document doc = docBuilder.parse(is); + Element rootElement = doc.getDocumentElement(); + + for (int i = 0; i < tagNames.length; i++) { + NodeList targetNodes = rootElement + .getElementsByTagName(tagNames[i]); + if (targetNodes.getLength() <= 0) { + s_logger.error("no " + tagNames[i] + + " tag in XML response...returning null"); + } else { + returnValues.put(tagNames[i], targetNodes.item(0) + .getTextContent()); + } + } + } catch (Exception ex) { + s_logger.error("error processing XML", ex); + } + return returnValues; + } + + public static Map getSingleValueFromXML(Element rootElement, + String[] tagNames) { + Map returnValues = new HashMap(); + if (rootElement == null) { + s_logger.error("Root element is null, can't get single value from xml"); + return null; + } + try { + for (int i = 0; i < tagNames.length; i++) { + NodeList targetNodes = rootElement + .getElementsByTagName(tagNames[i]); + if (targetNodes.getLength() <= 0) { + s_logger.error("no " + tagNames[i] + + " tag in XML response...returning null"); + } else { + returnValues.put(tagNames[i], targetNodes.item(0) + .getTextContent()); + } + } + } catch (Exception ex) { + s_logger.error("error processing XML", ex); + } + return returnValues; + } + + + private static List getNonSourceNatIPs(InputStream is) { + List returnValues = new ArrayList(); + try { + DocumentBuilder docBuilder = factory.newDocumentBuilder(); + Document doc = docBuilder.parse(is); + Element rootElement = doc.getDocumentElement(); + NodeList allocatedIpAddrNodes = rootElement + .getElementsByTagName("publicipaddress"); + for (int i = 0; i < allocatedIpAddrNodes.getLength(); i++) { + Node allocatedIpAddrNode = allocatedIpAddrNodes.item(i); + NodeList childNodes = allocatedIpAddrNode.getChildNodes(); + String ipAddress = null; + boolean isSourceNat = true; // assume it's source nat until we + // find otherwise + for (int j = 0; j < childNodes.getLength(); j++) { + Node n = childNodes.item(j); + if ("ipaddress".equals(n.getNodeName())) { + ipAddress = n.getTextContent(); + } else if ("issourcenat".equals(n.getNodeName())) { + isSourceNat = Boolean.parseBoolean(n.getTextContent()); + } + } + if ((ipAddress != null) && !isSourceNat) { + returnValues.add(ipAddress); + } + } + } catch (Exception ex) { + s_logger.error(ex); + } + return returnValues; + } private static List getSourceNatIPs(InputStream is) { List returnValues = new ArrayList(); @@ -475,519 +475,519 @@ public class StressTestDirectAttach { } private static String executeRegistration(String server, String username, - String password) throws HttpException, IOException { - String url = server + "?command=registerUserKeys&id=" + _userId.get().toString(); - s_logger.info("registering: " + username); - String returnValue = null; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map requestKeyValues = getSingleValueFromXML(is, - new String[] { "apikey", "secretkey" }); - _apiKey.set(requestKeyValues.get("apikey")); - returnValue = requestKeyValues.get("secretkey"); - } else { - s_logger.error("registration failed with error code: " + responseCode); - } - return returnValue; - } + String password) throws HttpException, IOException { + String url = server + "?command=registerUserKeys&id=" + _userId.get().toString(); + s_logger.info("registering: " + username); + String returnValue = null; + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(url); + int responseCode = client.executeMethod(method); + if (responseCode == 200) { + InputStream is = method.getResponseBodyAsStream(); + Map requestKeyValues = getSingleValueFromXML(is, + new String[] { "apikey", "secretkey" }); + _apiKey.set(requestKeyValues.get("apikey")); + returnValue = requestKeyValues.get("secretkey"); + } else { + s_logger.error("registration failed with error code: " + responseCode); + } + return returnValue; + } - private static Integer executeDeployment(String server, String developerServer, - String username) throws HttpException, IOException { - // test steps: - // - create user - // - deploy Windows VM - // - deploy Linux VM - // - associate IP address - // - create two IP forwarding rules - // - create load balancer rule - // - list IP forwarding rules - // - list load balancer rules + private static Integer executeDeployment(String server, String developerServer, + String username) throws HttpException, IOException { + // test steps: + // - create user + // - deploy Windows VM + // - deploy Linux VM + // - associate IP address + // - create two IP forwarding rules + // - create load balancer rule + // - list IP forwarding rules + // - list load balancer rules - // ----------------------------- - // CREATE USER - // ----------------------------- - String encodedUsername = URLEncoder.encode(username, "UTF-8"); - String encryptedPassword = createMD5Password(username); - String encodedPassword = URLEncoder.encode(encryptedPassword, "UTF-8"); - - String url = server + "?command=createUser&username=" + encodedUsername - + "&password=" + encodedPassword - + "&firstname=Test&lastname=Test&email=test@vmops.com&domainId=1&accounttype=0"; - if (accountName != null ) { - url = server + "?command=createUser&username=" + encodedUsername - + "&password=" + encodedPassword - + "&firstname=Test&lastname=Test&email=test@vmops.com&domainId=1&accounttype=0&account=" + accountName; - } - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - long userId = -1; - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userIdValues = getSingleValueFromXML(is, - new String[] { "id", "account" }); - String userIdStr = userIdValues.get("id"); - s_logger.info("created user " + username + " with id "+ userIdStr); - if (userIdStr != null) { - userId = Long.parseLong(userIdStr); - _userId.set(userId); - _account.set(userIdValues.get("account")); - if (userId == -1) { - s_logger - .error("create user (" + username + ") failed to retrieve a valid user id, aborting depolyment test"); - return -1; - } - } - } else { - s_logger.error("create user test failed for user " + username + " with error code :" + responseCode); - return responseCode; - } + // ----------------------------- + // CREATE USER + // ----------------------------- + String encodedUsername = URLEncoder.encode(username, "UTF-8"); + String encryptedPassword = createMD5Password(username); + String encodedPassword = URLEncoder.encode(encryptedPassword, "UTF-8"); - _secretKey.set(executeRegistration(server, username, username)); + String url = server + "?command=createUser&username=" + encodedUsername + + "&password=" + encodedPassword + + "&firstname=Test&lastname=Test&email=test@vmops.com&domainId=1&accounttype=0"; + if (accountName != null ) { + url = server + "?command=createUser&username=" + encodedUsername + + "&password=" + encodedPassword + + "&firstname=Test&lastname=Test&email=test@vmops.com&domainId=1&accounttype=0&account=" + accountName; + } + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(url); + int responseCode = client.executeMethod(method); + long userId = -1; + if (responseCode == 200) { + InputStream is = method.getResponseBodyAsStream(); + Map userIdValues = getSingleValueFromXML(is, + new String[] { "id", "account" }); + String userIdStr = userIdValues.get("id"); + s_logger.info("created user " + username + " with id "+ userIdStr); + if (userIdStr != null) { + userId = Long.parseLong(userIdStr); + _userId.set(userId); + _account.set(userIdValues.get("account")); + if (userId == -1) { + s_logger + .error("create user (" + username + ") failed to retrieve a valid user id, aborting depolyment test"); + return -1; + } + } + } else { + s_logger.error("create user test failed for user " + username + " with error code :" + responseCode); + return responseCode; + } - if (_secretKey.get() == null) { - s_logger - .error("FAILED to retrieve secret key during registration, skipping user: " - + username); - return -1; - } else { - s_logger.info("got secret key: " + _secretKey.get()); - s_logger.info("got api key: " + _apiKey.get()); - } + _secretKey.set(executeRegistration(server, username, username)); + + if (_secretKey.get() == null) { + s_logger + .error("FAILED to retrieve secret key during registration, skipping user: " + + username); + return -1; + } else { + s_logger.info("got secret key: " + _secretKey.get()); + s_logger.info("got api key: " + _apiKey.get()); + } - // --------------------------------- - // CREATE NETWORK GROUP AND ADD INGRESS RULE TO IT - // --------------------------------- - String networkAccount = null; - if (accountName != null) { - networkAccount = accountName; - } - else { - networkAccount = encodedUsername; - } - String encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey - + "&command=createSecurityGroup&name=" + encodedUsername; - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, _secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - url = developerServer + "?command=createSecurityGroup&name=" + encodedUsername + "&apikey=" + encodedApiKey - + "&signature=" + encodedSignature; - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map values = getSingleValueFromXML(is, - new String[] { "id" }); - - if (values.get("id") == null) { - s_logger.info("Create network rule response code: 401"); - return 401; - } - else { - s_logger.info("Create security group response code: " + responseCode); - } - } else { - s_logger.error("Create security group failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - - String encodedCidr = URLEncoder.encode("192.168.1.143/32", "UTF-8"); - url = server + "?command=authorizeSecurityGroupIngress&cidrlist=" + encodedCidr + "&endport=22&" + - "securitygroupname=" + encodedUsername +"&protocol=tcp&startport=22&account=" + networkAccount + "&domainid=1"; + // --------------------------------- + // CREATE NETWORK GROUP AND ADD INGRESS RULE TO IT + // --------------------------------- + String networkAccount = null; + if (accountName != null) { + networkAccount = accountName; + } + else { + networkAccount = encodedUsername; + } + String encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); + String requestToSign = "apikey=" + encodedApiKey + + "&command=createSecurityGroup&name=" + encodedUsername; + requestToSign = requestToSign.toLowerCase(); + String signature = signRequest(requestToSign, _secretKey.get()); + String encodedSignature = URLEncoder.encode(signature, "UTF-8"); + url = developerServer + "?command=createSecurityGroup&name=" + encodedUsername + "&apikey=" + encodedApiKey + + "&signature=" + encodedSignature; + method = new GetMethod(url); + responseCode = client.executeMethod(method); + if (responseCode == 200) { + InputStream is = method.getResponseBodyAsStream(); + Map values = getSingleValueFromXML(is, + new String[] { "id" }); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, - new String[] { "id" }); - - if (values.get("id") == null) { - s_logger.info("Authorise security group ingress response code: 401"); - return 401; - } - else { - s_logger.info("Authorise security group ingress response code: " + responseCode); - } - } else { - s_logger.error("Authorise security group ingress failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - + if (values.get("id") == null) { + s_logger.info("Create network rule response code: 401"); + return 401; + } + else { + s_logger.info("Create security group response code: " + responseCode); + } + } else { + s_logger.error("Create security group failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } - // --------------------------------- - // DEPLOY LINUX VM - // --------------------------------- - { - long templateId = 2; - String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); - String encodedServiceOfferingId = URLEncoder.encode("" - + serviceOfferingId, "UTF-8"); - String encodedTemplateId = URLEncoder.encode("" + templateId, - "UTF-8"); - encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); - requestToSign = "apikey=" + encodedApiKey - + "&command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&serviceofferingid=" - + encodedServiceOfferingId + "&templateid=" - + encodedTemplateId + "&zoneid=" + encodedZoneId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, _secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - url = developerServer + "?command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&zoneid=" + encodedZoneId + "&serviceofferingid=" - + encodedServiceOfferingId + "&templateid=" - + encodedTemplateId + "&apikey=" + encodedApiKey - + "&signature=" + encodedSignature; - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, - new String[] { "id", "ipaddress" }); - - if ((values.get("ipaddress") == null) || (values - .get("id") == null)) { - s_logger.info("deploy linux vm response code: 401"); - return 401; - } - else { - s_logger.info("deploy linux vm response code: " + responseCode); - long linuxVMId = Long.parseLong(values.get("id")); - s_logger.info("got linux virtual machine id: " + linuxVMId); - _linuxVmId.set(values.get("id")); - _linuxIP.set(values.get("ipaddress")); - _linuxPassword.set("rs-ccb35ea5"); - } - } else { - s_logger.error("deploy linux vm failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - - - //Create a new volume - { - url = server + "?command=createVolume&diskofferingid=" + diskOfferingId + "&zoneid=" + zoneId + "&name=newvolume&account=" + _account.get() + "&domainid=1"; - s_logger.info("Creating volume...."); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, - new String[] { "id" }); - - if (values.get("id") == null) { - s_logger.info("create volume response code: 401"); - return 401; - } - else { - s_logger.info("create volume response code: " + responseCode); - String volumeId = values.get("id"); - s_logger.info("got volume id: " + volumeId); - _newVolume.set(volumeId); - } - } else { - s_logger.error("create volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //attach a new volume to the vm - { - url = server + "?command=attachVolume&id=" + _newVolume.get() + "&virtualmachineid=" + _linuxVmId.get(); - s_logger.info("Attaching volume with id " + _newVolume.get() + " to the vm " + _linuxVmId.get()); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - s_logger.info("Attach data volume response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, - new String[] { "id" }); - - if (values.get("id") == null) { - s_logger.info("Attach volume response code: 401"); - return 401; - } - else { - s_logger.info("Attach volume response code: " + responseCode); - } - } else { - s_logger.error("Attach volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //DEPLOY SECOND VM, ADD VOLUME TO IT - - // --------------------------------- - // DEPLOY another linux vm - // --------------------------------- - { - long templateId = 2; - String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); - String encodedServiceOfferingId = URLEncoder.encode("" - + serviceOfferingId, "UTF-8"); - String encodedTemplateId = URLEncoder.encode("" + templateId, - "UTF-8"); - encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); - requestToSign = "apikey=" + encodedApiKey - + "&command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&serviceofferingid=" - + encodedServiceOfferingId + "&templateid=" - + encodedTemplateId + "&zoneid=" + encodedZoneId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, _secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - url = developerServer + "?command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&zoneid=" + encodedZoneId + "&serviceofferingid=" - + encodedServiceOfferingId + "&templateid=" - + encodedTemplateId + "&apikey=" + encodedApiKey - + "&signature=" + encodedSignature; + String encodedCidr = URLEncoder.encode("192.168.1.143/32", "UTF-8"); + url = server + "?command=authorizeSecurityGroupIngress&cidrlist=" + encodedCidr + "&endport=22&" + + "securitygroupname=" + encodedUsername +"&protocol=tcp&startport=22&account=" + networkAccount + "&domainid=1"; - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, - new String[] { "id", "ipaddress" }); - - if ((values.get("ipaddress") == null) || (values - .get("id") == null)) { - s_logger.info("deploy linux vm response code: 401"); - return 401; - } - else { - s_logger.info("deploy linux vm response code: " + responseCode); - long linuxVMId = Long.parseLong(values.get("id")); - s_logger.info("got linux virtual machine id: " + linuxVMId); - _linuxVmId1.set(values.get("id")); - } - } else { - s_logger.error("deploy linux vm failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - - - //Create a new volume - { - url = server + "?command=createVolume&diskofferingid=" + diskOfferingId1 + "&zoneid=" + zoneId + "&name=newvolume1&account=" + _account.get() + "&domainid=1"; - s_logger.info("Creating volume...."); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, - new String[] { "id" }); - - if (values.get("id") == null) { - s_logger.info("create volume response code: 401"); - return 401; - } - else { - s_logger.info("create volume response code: " + responseCode); - String volumeId = values.get("id"); - s_logger.info("got volume id: " + volumeId); - _newVolume1.set(volumeId); - } - } else { - s_logger.error("create volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //attach a new volume to the vm - { - url = server + "?command=attachVolume&id=" + _newVolume1.get() + "&virtualmachineid=" + _linuxVmId1.get(); - s_logger.info("Attaching volume with id " + _newVolume1.get() + " to the vm " + _linuxVmId1.get()); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - s_logger.info("Attach data volume response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, - new String[] { "id" }); - - if (values.get("id") == null) { - s_logger.info("Attach volume response code: 401"); - return 401; - } - else { - s_logger.info("Attach volume response code: " + responseCode); - } - } else { - s_logger.error("Attach volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - return 200; - } + method = new GetMethod(url); + responseCode = client.executeMethod(method); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map values = getSingleValueFromXML(el, + new String[] { "id" }); + + if (values.get("id") == null) { + s_logger.info("Authorise security group ingress response code: 401"); + return 401; + } + else { + s_logger.info("Authorise security group ingress response code: " + responseCode); + } + } else { + s_logger.error("Authorise security group ingress failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + + + + // --------------------------------- + // DEPLOY LINUX VM + // --------------------------------- + { + long templateId = 2; + String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); + String encodedServiceOfferingId = URLEncoder.encode("" + + serviceOfferingId, "UTF-8"); + String encodedTemplateId = URLEncoder.encode("" + templateId, + "UTF-8"); + encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); + requestToSign = "apikey=" + encodedApiKey + + "&command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&serviceofferingid=" + + encodedServiceOfferingId + "&templateid=" + + encodedTemplateId + "&zoneid=" + encodedZoneId; + requestToSign = requestToSign.toLowerCase(); + signature = signRequest(requestToSign, _secretKey.get()); + encodedSignature = URLEncoder.encode(signature, "UTF-8"); + url = developerServer + "?command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&zoneid=" + encodedZoneId + "&serviceofferingid=" + + encodedServiceOfferingId + "&templateid=" + + encodedTemplateId + "&apikey=" + encodedApiKey + + "&signature=" + encodedSignature; + + method = new GetMethod(url); + responseCode = client.executeMethod(method); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map values = getSingleValueFromXML(el, + new String[] { "id", "ipaddress" }); + + if ((values.get("ipaddress") == null) || (values + .get("id") == null)) { + s_logger.info("deploy linux vm response code: 401"); + return 401; + } + else { + s_logger.info("deploy linux vm response code: " + responseCode); + long linuxVMId = Long.parseLong(values.get("id")); + s_logger.info("got linux virtual machine id: " + linuxVMId); + _linuxVmId.set(values.get("id")); + _linuxIP.set(values.get("ipaddress")); + _linuxPassword.set("rs-ccb35ea5"); + } + } else { + s_logger.error("deploy linux vm failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + } + + + + //Create a new volume + { + url = server + "?command=createVolume&diskofferingid=" + diskOfferingId + "&zoneid=" + zoneId + "&name=newvolume&account=" + _account.get() + "&domainid=1"; + s_logger.info("Creating volume...."); + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map values = getSingleValueFromXML(el, + new String[] { "id" }); + + if (values.get("id") == null) { + s_logger.info("create volume response code: 401"); + return 401; + } + else { + s_logger.info("create volume response code: " + responseCode); + String volumeId = values.get("id"); + s_logger.info("got volume id: " + volumeId); + _newVolume.set(volumeId); + } + } else { + s_logger.error("create volume failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + } + + //attach a new volume to the vm + { + url = server + "?command=attachVolume&id=" + _newVolume.get() + "&virtualmachineid=" + _linuxVmId.get(); + s_logger.info("Attaching volume with id " + _newVolume.get() + " to the vm " + _linuxVmId.get()); + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + s_logger.info("Attach data volume response code: " + responseCode); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map values = getSingleValueFromXML(el, + new String[] { "id" }); + + if (values.get("id") == null) { + s_logger.info("Attach volume response code: 401"); + return 401; + } + else { + s_logger.info("Attach volume response code: " + responseCode); + } + } else { + s_logger.error("Attach volume failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + } + + //DEPLOY SECOND VM, ADD VOLUME TO IT + + // --------------------------------- + // DEPLOY another linux vm + // --------------------------------- + { + long templateId = 2; + String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); + String encodedServiceOfferingId = URLEncoder.encode("" + + serviceOfferingId, "UTF-8"); + String encodedTemplateId = URLEncoder.encode("" + templateId, + "UTF-8"); + encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); + requestToSign = "apikey=" + encodedApiKey + + "&command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&serviceofferingid=" + + encodedServiceOfferingId + "&templateid=" + + encodedTemplateId + "&zoneid=" + encodedZoneId; + requestToSign = requestToSign.toLowerCase(); + signature = signRequest(requestToSign, _secretKey.get()); + encodedSignature = URLEncoder.encode(signature, "UTF-8"); + url = developerServer + "?command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&zoneid=" + encodedZoneId + "&serviceofferingid=" + + encodedServiceOfferingId + "&templateid=" + + encodedTemplateId + "&apikey=" + encodedApiKey + + "&signature=" + encodedSignature; + + method = new GetMethod(url); + responseCode = client.executeMethod(method); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map values = getSingleValueFromXML(el, + new String[] { "id", "ipaddress" }); + + if ((values.get("ipaddress") == null) || (values + .get("id") == null)) { + s_logger.info("deploy linux vm response code: 401"); + return 401; + } + else { + s_logger.info("deploy linux vm response code: " + responseCode); + long linuxVMId = Long.parseLong(values.get("id")); + s_logger.info("got linux virtual machine id: " + linuxVMId); + _linuxVmId1.set(values.get("id")); + } + } else { + s_logger.error("deploy linux vm failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + } + + + + //Create a new volume + { + url = server + "?command=createVolume&diskofferingid=" + diskOfferingId1 + "&zoneid=" + zoneId + "&name=newvolume1&account=" + _account.get() + "&domainid=1"; + s_logger.info("Creating volume...."); + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map values = getSingleValueFromXML(el, + new String[] { "id" }); + + if (values.get("id") == null) { + s_logger.info("create volume response code: 401"); + return 401; + } + else { + s_logger.info("create volume response code: " + responseCode); + String volumeId = values.get("id"); + s_logger.info("got volume id: " + volumeId); + _newVolume1.set(volumeId); + } + } else { + s_logger.error("create volume failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + } + + //attach a new volume to the vm + { + url = server + "?command=attachVolume&id=" + _newVolume1.get() + "&virtualmachineid=" + _linuxVmId1.get(); + s_logger.info("Attaching volume with id " + _newVolume1.get() + " to the vm " + _linuxVmId1.get()); + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + s_logger.info("Attach data volume response code: " + responseCode); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map values = getSingleValueFromXML(el, + new String[] { "id" }); + + if (values.get("id") == null) { + s_logger.info("Attach volume response code: 401"); + return 401; + } + else { + s_logger.info("Attach volume response code: " + responseCode); + } + } else { + s_logger.error("Attach volume failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + } + return 200; + } - private static int executeCleanup(String server, String developerServer, String username) - throws HttpException, IOException { - // test steps: - // - get user - // - delete user + private static int executeCleanup(String server, String developerServer, String username) + throws HttpException, IOException { + // test steps: + // - get user + // - delete user - // ----------------------------- - // GET USER - // ----------------------------- - String userId = _userId.get().toString(); - String encodedUserId = URLEncoder.encode(userId, "UTF-8"); - String url = server + "?command=listUsers&id=" + encodedUserId; - s_logger.info("Cleaning up resources for user: " + userId + " with url " + url); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - s_logger.info("get user response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userInfo = getSingleValueFromXML(is, - new String[] { "username", "id", "account" }); - if (!username.equals(userInfo.get("username"))) { - s_logger - .error("get user failed to retrieve requested user, aborting cleanup test" + ". Following URL was sent: " + url); - return -1; - } - - } else { - s_logger.error("get user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } + // ----------------------------- + // GET USER + // ----------------------------- + String userId = _userId.get().toString(); + String encodedUserId = URLEncoder.encode(userId, "UTF-8"); + String url = server + "?command=listUsers&id=" + encodedUserId; + s_logger.info("Cleaning up resources for user: " + userId + " with url " + url); + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(url); + int responseCode = client.executeMethod(method); + s_logger.info("get user response code: " + responseCode); + if (responseCode == 200) { + InputStream is = method.getResponseBodyAsStream(); + Map userInfo = getSingleValueFromXML(is, + new String[] { "username", "id", "account" }); + if (!username.equals(userInfo.get("username"))) { + s_logger + .error("get user failed to retrieve requested user, aborting cleanup test" + ". Following URL was sent: " + url); + return -1; + } - // ----------------------------- - // UPDATE USER - // ----------------------------- - { - url = server + "?command=updateUser&id=" + userId - + "&firstname=delete&lastname=me"; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - s_logger.info("update user response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, - new String[] { "success" }); - s_logger - .info("update user..success? " + success.get("success")); - } else { - s_logger.error("update user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // ----------------------------- - // Execute reboot/stop/start commands for the VMs before deleting the account - made to exercise xen - // ----------------------------- - - //Reboot centos VM - String encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey + "&command=rebootVirtualMachine&id=" + _linuxVmId.get(); - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, _secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=rebootVirtualMachine&id=" + _linuxVmId.get() + "&apikey=" - + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - s_logger.info("Reboot VM response code: " - + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, - new String[] { "success" }); - s_logger.info("VM was rebooted with the status: " - + success.get("success")); - } else { - s_logger.error(" VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - //Stop centos VM - requestToSign = "apikey=" + encodedApiKey + "&command=stopVirtualMachine&id=" + _linuxVmId.get(); - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, _secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=stopVirtualMachine&id=" + _linuxVmId.get() + "&apikey=" - + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - s_logger.info("Stop VM response code: " - + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, - new String[] { "success" }); - s_logger.info("VM was stopped with the status: " - + success.get("success")); - } else { - s_logger.error("Stop VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - //Start centos VM - requestToSign = "apikey=" + encodedApiKey + "&command=startVirtualMachine&id=" + _linuxVmId.get(); - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, _secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=startVirtualMachine&id=" + _linuxVmId.get() + "&apikey=" - + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - s_logger.info("Start VM response code: " - + responseCode); + } else { + s_logger.error("get user failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + + // ----------------------------- + // UPDATE USER + // ----------------------------- + { + url = server + "?command=updateUser&id=" + userId + + "&firstname=delete&lastname=me"; + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + s_logger.info("update user response code: " + responseCode); + if (responseCode == 200) { + InputStream is = method.getResponseBodyAsStream(); + Map success = getSingleValueFromXML(is, + new String[] { "success" }); + s_logger + .info("update user..success? " + success.get("success")); + } else { + s_logger.error("update user failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + } + + // ----------------------------- + // Execute reboot/stop/start commands for the VMs before deleting the account - made to exercise xen + // ----------------------------- + + //Reboot centos VM + String encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); + String requestToSign = "apikey=" + encodedApiKey + "&command=rebootVirtualMachine&id=" + _linuxVmId.get(); + requestToSign = requestToSign.toLowerCase(); + String signature = signRequest(requestToSign, _secretKey.get()); + String encodedSignature = URLEncoder.encode(signature, "UTF-8"); + + url = developerServer + "?command=rebootVirtualMachine&id=" + _linuxVmId.get() + "&apikey=" + + encodedApiKey + "&signature=" + encodedSignature; + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + s_logger.info("Reboot VM response code: " + + responseCode); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map success = getSingleValueFromXML(el, + new String[] { "success" }); + s_logger.info("VM was rebooted with the status: " + + success.get("success")); + } else { + s_logger.error(" VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + + //Stop centos VM + requestToSign = "apikey=" + encodedApiKey + "&command=stopVirtualMachine&id=" + _linuxVmId.get(); + requestToSign = requestToSign.toLowerCase(); + signature = signRequest(requestToSign, _secretKey.get()); + encodedSignature = URLEncoder.encode(signature, "UTF-8"); + + url = developerServer + "?command=stopVirtualMachine&id=" + _linuxVmId.get() + "&apikey=" + + encodedApiKey + "&signature=" + encodedSignature; + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + s_logger.info("Stop VM response code: " + + responseCode); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map success = getSingleValueFromXML(el, + new String[] { "success" }); + s_logger.info("VM was stopped with the status: " + + success.get("success")); + } else { + s_logger.error("Stop VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + + //Start centos VM + requestToSign = "apikey=" + encodedApiKey + "&command=startVirtualMachine&id=" + _linuxVmId.get(); + requestToSign = requestToSign.toLowerCase(); + signature = signRequest(requestToSign, _secretKey.get()); + encodedSignature = URLEncoder.encode(signature, "UTF-8"); + + url = developerServer + "?command=startVirtualMachine&id=" + _linuxVmId.get() + "&apikey=" + + encodedApiKey + "&signature=" + encodedSignature; + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + s_logger.info("Start VM response code: " + + responseCode); + + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map success = getSingleValueFromXML(el, + new String[] { "id" }); + + if (success.get("id") == null) { + s_logger.info("Start linux vm response code: 401"); + return 401; + } + else { + s_logger.info("Start vm response code: " + responseCode); + } + + s_logger.info("VM was started with the status: " + + success.get("success")); + } else { + s_logger.error("Start VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, - new String[] { "id" }); - - if (success.get("id") == null) { - s_logger.info("Start linux vm response code: 401"); - return 401; - } - else { - s_logger.info("Start vm response code: " + responseCode); - } - - s_logger.info("VM was started with the status: " - + success.get("success")); - } else { - s_logger.error("Start VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - //// // ----------------------------- //// // DISABLE USER //// // ----------------------------- @@ -1008,184 +1008,184 @@ public class StressTestDirectAttach { // } // } - // ----------------------------- - // DELETE USER - // ----------------------------- - { - url = server + "?command=deleteUser&id=" + userId; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - s_logger.info("delete user response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - s_logger - .info("Deleted user successfully"); - } else { - s_logger.error("delete user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - return responseCode; - } + // ----------------------------- + // DELETE USER + // ----------------------------- + { + url = server + "?command=deleteUser&id=" + userId; + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + s_logger.info("delete user response code: " + responseCode); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + s_logger + .info("Deleted user successfully"); + } else { + s_logger.error("delete user failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + } + return responseCode; + } - private static int executeEventsAndBilling(String server, String developerServer) - throws HttpException, IOException { - // test steps: - // - get all the events in the system for all users in the system - // - generate all the usage records in the system - // - get all the usage records in the system + private static int executeEventsAndBilling(String server, String developerServer) + throws HttpException, IOException { + // test steps: + // - get all the events in the system for all users in the system + // - generate all the usage records in the system + // - get all the usage records in the system - // ----------------------------- - // GET EVENTS - // ----------------------------- - String url =server+"?command=listEvents&page=1&account=" + _account.get(); - - s_logger.info("Getting events for the account " + _account.get()); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - s_logger.info("get events response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> eventDescriptions = getMultipleValuesFromXML( - is, new String[] { "description" }); - List descriptionText = eventDescriptions.get("description"); - if (descriptionText == null) { - s_logger.info("no events retrieved..."); - } else { - for (String text : descriptionText) { - s_logger.info("event: " + text); - } - } - } else { - s_logger.error("list events failed with error code: " + responseCode + ". Following URL was sent: " + url); - - return responseCode; - } - return responseCode; - } - + // ----------------------------- + // GET EVENTS + // ----------------------------- + String url =server+"?command=listEvents&page=1&account=" + _account.get(); - private static int executeStop(String server, String developerServer, - String username) throws HttpException, IOException { - // test steps: - // - get userId for the given username - // - list virtual machines for the user - // - stop all virtual machines - // - get ip addresses for the user - // - release ip addresses + s_logger.info("Getting events for the account " + _account.get()); + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(url); + int responseCode = client.executeMethod(method); + s_logger.info("get events response code: " + responseCode); + if (responseCode == 200) { + InputStream is = method.getResponseBodyAsStream(); + Map> eventDescriptions = getMultipleValuesFromXML( + is, new String[] { "description" }); + List descriptionText = eventDescriptions.get("description"); + if (descriptionText == null) { + s_logger.info("no events retrieved..."); + } else { + for (String text : descriptionText) { + s_logger.info("event: " + text); + } + } + } else { + s_logger.error("list events failed with error code: " + responseCode + ". Following URL was sent: " + url); - // ----------------------------- - // GET USER - // ----------------------------- - String userId = _userId.get().toString(); - String encodedUserId = URLEncoder.encode(userId, "UTF-8"); + return responseCode; + } + return responseCode; + } - String url = server + "?command=listUsers&id=" + encodedUserId; - s_logger.info("Stopping resources for user: " + username); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - s_logger.info("get user response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userIdValues = getSingleValueFromXML(is, - new String[] { "id" }); - String userIdStr = userIdValues.get("id"); - if (userIdStr != null) { - userId = userIdStr; - if (userId == null) { - s_logger - .error("get user failed to retrieve a valid user id, aborting depolyment test" + ". Following URL was sent: " + url); - return -1; - } - } - } else { - s_logger.error("get user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - { - // ---------------------------------- - // LIST VIRTUAL MACHINES - // ---------------------------------- - String encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey - + "&command=listVirtualMachines"; - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, _secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); + private static int executeStop(String server, String developerServer, + String username) throws HttpException, IOException { + // test steps: + // - get userId for the given username + // - list virtual machines for the user + // - stop all virtual machines + // - get ip addresses for the user + // - release ip addresses - url = developerServer + "?command=listVirtualMachines&apikey=" + encodedApiKey + "&signature=" - + encodedSignature; - - s_logger.info("Listing all virtual machines for the user with url " + url); - String[] vmIds = null; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - s_logger.info("list virtual machines response code: " - + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> vmIdValues = getMultipleValuesFromXML( - is, new String[] { "id" }); - if (vmIdValues.containsKey("id")) { - List vmIdList = vmIdValues.get("id"); - if (vmIdList != null) { - vmIds = new String[vmIdList.size()]; - vmIdList.toArray(vmIds); - String vmIdLogStr = ""; - if ((vmIds != null) && (vmIds.length > 0)) { - vmIdLogStr = vmIds[0]; - for (int i = 1; i < vmIds.length; i++) { - vmIdLogStr = vmIdLogStr + "," + vmIds[i]; - } - } - s_logger.info("got virtual machine ids: " + vmIdLogStr); - } - } - - - } else { - s_logger.error("list virtual machines test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - - // ---------------------------------- - // STOP/DESTROY VIRTUAL MACHINES - // ---------------------------------- - if (vmIds != null) { - for (String vmId : vmIds) { - requestToSign = "apikey=" + encodedApiKey + "&command=stopVirtualMachine&id=" + vmId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, _secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); + // ----------------------------- + // GET USER + // ----------------------------- + String userId = _userId.get().toString(); + String encodedUserId = URLEncoder.encode(userId, "UTF-8"); + + String url = server + "?command=listUsers&id=" + encodedUserId; + s_logger.info("Stopping resources for user: " + username); + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(url); + int responseCode = client.executeMethod(method); + s_logger.info("get user response code: " + responseCode); + if (responseCode == 200) { + InputStream is = method.getResponseBodyAsStream(); + Map userIdValues = getSingleValueFromXML(is, + new String[] { "id" }); + String userIdStr = userIdValues.get("id"); + if (userIdStr != null) { + userId = userIdStr; + if (userId == null) { + s_logger + .error("get user failed to retrieve a valid user id, aborting depolyment test" + ". Following URL was sent: " + url); + return -1; + } + } + } else { + s_logger.error("get user failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + + { + // ---------------------------------- + // LIST VIRTUAL MACHINES + // ---------------------------------- + String encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); + String requestToSign = "apikey=" + encodedApiKey + + "&command=listVirtualMachines"; + requestToSign = requestToSign.toLowerCase(); + String signature = signRequest(requestToSign, _secretKey.get()); + String encodedSignature = URLEncoder.encode(signature, "UTF-8"); + + url = developerServer + "?command=listVirtualMachines&apikey=" + encodedApiKey + "&signature=" + + encodedSignature; + + s_logger.info("Listing all virtual machines for the user with url " + url); + String[] vmIds = null; + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + s_logger.info("list virtual machines response code: " + + responseCode); + if (responseCode == 200) { + InputStream is = method.getResponseBodyAsStream(); + Map> vmIdValues = getMultipleValuesFromXML( + is, new String[] { "id" }); + if (vmIdValues.containsKey("id")) { + List vmIdList = vmIdValues.get("id"); + if (vmIdList != null) { + vmIds = new String[vmIdList.size()]; + vmIdList.toArray(vmIds); + String vmIdLogStr = ""; + if ((vmIds != null) && (vmIds.length > 0)) { + vmIdLogStr = vmIds[0]; + for (int i = 1; i < vmIds.length; i++) { + vmIdLogStr = vmIdLogStr + "," + vmIds[i]; + } + } + s_logger.info("got virtual machine ids: " + vmIdLogStr); + } + } + + + } else { + s_logger.error("list virtual machines test failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + + + // ---------------------------------- + // STOP/DESTROY VIRTUAL MACHINES + // ---------------------------------- + if (vmIds != null) { + for (String vmId : vmIds) { + requestToSign = "apikey=" + encodedApiKey + "&command=stopVirtualMachine&id=" + vmId; + requestToSign = requestToSign.toLowerCase(); + signature = signRequest(requestToSign, _secretKey.get()); + encodedSignature = URLEncoder.encode(signature, "UTF-8"); + + url = developerServer + "?command=stopVirtualMachine&id=" + vmId + "&apikey=" + + encodedApiKey + "&signature=" + encodedSignature; + client = new HttpClient(); + method = new GetMethod(url); + responseCode = client.executeMethod(method); + s_logger.info("StopVirtualMachine" + " [" + vmId + "] response code: " + + responseCode); + if (responseCode == 200) { + InputStream input = method.getResponseBodyAsStream(); + Element el = queryAsyncJobResult(server, input); + Map success = getSingleValueFromXML(el, + new String[] { "success" }); + s_logger.info("StopVirtualMachine..success? " + + success.get("success")); + } else { + s_logger.error("Stop virtual machine test failed with error code: " + responseCode + ". Following URL was sent: " + url); + return responseCode; + } + } + } - url = developerServer + "?command=stopVirtualMachine&id=" + vmId + "&apikey=" - + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - s_logger.info("StopVirtualMachine" + " [" + vmId + "] response code: " - + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, - new String[] { "success" }); - s_logger.info("StopVirtualMachine..success? " - + success.get("success")); - } else { - s_logger.error("Stop virtual machine test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - } - // { // url = server + "?command=deleteUser&id=" + userId; // client = new HttpClient(); @@ -1202,327 +1202,327 @@ public class StressTestDirectAttach { // return responseCode; // } // } - - - } - _linuxIP.set(""); - _linuxVmId.set(""); - _linuxPassword.set(""); - _windowsIP.set(""); - _secretKey.set(""); - _apiKey.set(""); - _userId.set(Long.parseLong("0")); - _account.set(""); - _domainRouterId.set(""); - return responseCode; - } - public static String signRequest(String request, String key) { - try { - Mac mac = Mac.getInstance("HmacSHA1"); - SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), - "HmacSHA1"); - mac.init(keySpec); - mac.update(request.getBytes()); - byte[] encryptedBytes = mac.doFinal(); - return Base64.encodeBytes(encryptedBytes); - } catch (Exception ex) { - s_logger.error("unable to sign request", ex); - } - return null; - } + } - private static String sshWinTest(String host) { - if (host == null) { - s_logger - .info("Did not receive a host back from test, ignoring win ssh test"); - return null; - } + _linuxIP.set(""); + _linuxVmId.set(""); + _linuxPassword.set(""); + _windowsIP.set(""); + _secretKey.set(""); + _apiKey.set(""); + _userId.set(Long.parseLong("0")); + _account.set(""); + _domainRouterId.set(""); + return responseCode; + } - // We will retry 5 times before quitting - int retry = 1; + public static String signRequest(String request, String key) { + try { + Mac mac = Mac.getInstance("HmacSHA1"); + SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), + "HmacSHA1"); + mac.init(keySpec); + mac.update(request.getBytes()); + byte[] encryptedBytes = mac.doFinal(); + return Base64.encodeBase64String(encryptedBytes); + } catch (Exception ex) { + s_logger.error("unable to sign request", ex); + } + return null; + } - while (true) { - try { - if (retry > 0) { - s_logger.info("Retry attempt : " + retry - + " ...sleeping 300 seconds before next attempt. Account is " + _account.get()); - Thread.sleep(300000); - } + private static String sshWinTest(String host) { + if (host == null) { + s_logger + .info("Did not receive a host back from test, ignoring win ssh test"); + return null; + } - s_logger.info("Attempting to SSH into windows host " + host - + " with retry attempt: " + retry + " for account " + _account.get()); + // We will retry 5 times before quitting + int retry = 1; - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); + while (true) { + try { + if (retry > 0) { + s_logger.info("Retry attempt : " + retry + + " ...sleeping 300 seconds before next attempt. Account is " + _account.get()); + Thread.sleep(300000); + } - s_logger.info("User " + _account.get() + " ssHed successfully into windows host " + host); - boolean success = false; - boolean isAuthenticated = conn.authenticateWithPassword( - "Administrator", "password"); - if (isAuthenticated == false) { - return "Authentication failed"; - } - else { - s_logger.info("Authentication is successfull"); - } - - try { - SCPClient scp = new SCPClient(conn); - scp.put("wget.exe", "wget.exe", "C:\\Users\\Administrator", "0777"); - s_logger.info("Successfully put wget.exe file"); - } catch (Exception ex) { - s_logger.error("Unable to put wget.exe " + ex); - } - - if (conn == null ){ - s_logger.error("Connection is null"); - } - Session sess = conn.openSession(); + s_logger.info("Attempting to SSH into windows host " + host + + " with retry attempt: " + retry + " for account " + _account.get()); - s_logger.info("User + " + _account.get() + " executing : wget http://192.168.1.250/dump.bin"); - sess - .execCommand("wget http://192.168.1.250/dump.bin && dir dump.bin"); + Connection conn = new Connection(host); + conn.connect(null, 60000, 60000); - InputStream stdout = sess.getStdout(); - InputStream stderr = sess.getStderr(); + s_logger.info("User " + _account.get() + " ssHed successfully into windows host " + host); + boolean success = false; + boolean isAuthenticated = conn.authenticateWithPassword( + "Administrator", "password"); + if (isAuthenticated == false) { + return "Authentication failed"; + } + else { + s_logger.info("Authentication is successfull"); + } - byte[] buffer = new byte[8192]; - while (true) { - if ((stdout.available() == 0) && (stderr.available() == 0)) { - int conditions = sess.waitForCondition( - ChannelCondition.STDOUT_DATA - | ChannelCondition.STDERR_DATA - | ChannelCondition.EOF, 120000); + try { + SCPClient scp = new SCPClient(conn); + scp.put("wget.exe", "wget.exe", "C:\\Users\\Administrator", "0777"); + s_logger.info("Successfully put wget.exe file"); + } catch (Exception ex) { + s_logger.error("Unable to put wget.exe " + ex); + } - if ((conditions & ChannelCondition.TIMEOUT) != 0) { - s_logger - .info("Timeout while waiting for data from peer."); - return null; - } + if (conn == null ){ + s_logger.error("Connection is null"); + } + Session sess = conn.openSession(); - if ((conditions & ChannelCondition.EOF) != 0) { - if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { - break; - } - } - } + s_logger.info("User + " + _account.get() + " executing : wget http://192.168.1.250/dump.bin"); + sess + .execCommand("wget http://192.168.1.250/dump.bin && dir dump.bin"); - while (stdout.available() > 0) { - success = true; - int len = stdout.read(buffer); - if (len > 0) // this check is somewhat paranoid - s_logger.info(new String(buffer, 0, len)); - } + InputStream stdout = sess.getStdout(); + InputStream stderr = sess.getStderr(); - while (stderr.available() > 0) { - /* int len = */stderr.read(buffer); - } - } - sess.close(); - conn.close(); + byte[] buffer = new byte[8192]; + while (true) { + if ((stdout.available() == 0) && (stderr.available() == 0)) { + int conditions = sess.waitForCondition( + ChannelCondition.STDOUT_DATA + | ChannelCondition.STDERR_DATA + | ChannelCondition.EOF, 120000); - if (success) { - Thread.sleep(120000); - return null; - } else { - retry++; - if (retry == MAX_RETRY_WIN) { - return "SSH Windows Network test fail for account " + _account.get(); - } - } - } catch (Exception e) { - s_logger.error(e); - retry++; - if (retry == MAX_RETRY_WIN) { - return "SSH Windows Network test fail with error " - + e.getMessage(); - } - } - } - } + if ((conditions & ChannelCondition.TIMEOUT) != 0) { + s_logger + .info("Timeout while waiting for data from peer."); + return null; + } - private static String sshTest(String host, String password) { - int i = 0; - if (host == null) { - s_logger - .info("Did not receive a host back from test, ignoring ssh test"); - return null; - } - - if (password == null){ - s_logger.info("Did not receive a password back from test, ignoring ssh test"); - return null; - } + if ((conditions & ChannelCondition.EOF) != 0) { + if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { + break; + } + } + } - // We will retry 5 times before quitting - String result = null; - int retry = 0; + while (stdout.available() > 0) { + success = true; + int len = stdout.read(buffer); + if (len > 0) // this check is somewhat paranoid + s_logger.info(new String(buffer, 0, len)); + } - while (true) { - try { - if (retry > 0) { - s_logger.info("Retry attempt : " + retry - + " ...sleeping 120 seconds before next attempt. Account is " + _account.get()); - Thread.sleep(120000); - } + while (stderr.available() > 0) { + /* int len = */stderr.read(buffer); + } + } + sess.close(); + conn.close(); - s_logger.info("Attempting to SSH into linux host " + host - + " with retry attempt: " + retry + ". Account is " + _account.get()); + if (success) { + Thread.sleep(120000); + return null; + } else { + retry++; + if (retry == MAX_RETRY_WIN) { + return "SSH Windows Network test fail for account " + _account.get(); + } + } + } catch (Exception e) { + s_logger.error(e); + retry++; + if (retry == MAX_RETRY_WIN) { + return "SSH Windows Network test fail with error " + + e.getMessage(); + } + } + } + } - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); + private static String sshTest(String host, String password) { + int i = 0; + if (host == null) { + s_logger + .info("Did not receive a host back from test, ignoring ssh test"); + return null; + } - s_logger.info("User + " + _account.get() + " ssHed successfully into linux host " + host); + if (password == null){ + s_logger.info("Did not receive a password back from test, ignoring ssh test"); + return null; + } - boolean isAuthenticated = conn.authenticateWithPassword("root", - password); + // We will retry 5 times before quitting + String result = null; + int retry = 0; - if (isAuthenticated == false) { - s_logger.info("Authentication failed for root with password" + password); - return "Authentication failed"; - - } - - boolean success = false; - String linuxCommand = null; - - if (i % 10 == 0) - linuxCommand = "rm -rf *; wget http://192.168.1.250/dump.bin && ls -al dump.bin"; - else - linuxCommand = "wget http://192.168.1.250/dump.bin && ls -al dump.bin"; - - Session sess = conn.openSession(); - s_logger.info("User " + _account.get() + " executing : " + linuxCommand); - sess.execCommand(linuxCommand); + while (true) { + try { + if (retry > 0) { + s_logger.info("Retry attempt : " + retry + + " ...sleeping 120 seconds before next attempt. Account is " + _account.get()); + Thread.sleep(120000); + } - InputStream stdout = sess.getStdout(); - InputStream stderr = sess.getStderr(); - + s_logger.info("Attempting to SSH into linux host " + host + + " with retry attempt: " + retry + ". Account is " + _account.get()); - byte[] buffer = new byte[8192]; - while (true) { - if ((stdout.available() == 0) && (stderr.available() == 0)) { - int conditions = sess.waitForCondition( - ChannelCondition.STDOUT_DATA - | ChannelCondition.STDERR_DATA - | ChannelCondition.EOF, 120000); + Connection conn = new Connection(host); + conn.connect(null, 60000, 60000); - if ((conditions & ChannelCondition.TIMEOUT) != 0) { - s_logger - .info("Timeout while waiting for data from peer."); - return null; - } + s_logger.info("User + " + _account.get() + " ssHed successfully into linux host " + host); - if ((conditions & ChannelCondition.EOF) != 0) { - if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { - break; - } - } - } + boolean isAuthenticated = conn.authenticateWithPassword("root", + password); - while (stdout.available() > 0) { - success = true; - int len = stdout.read(buffer); - if (len > 0) // this check is somewhat paranoid - s_logger.info(new String(buffer, 0, len)); - } + if (isAuthenticated == false) { + s_logger.info("Authentication failed for root with password" + password); + return "Authentication failed"; - while (stderr.available() > 0) { - /* int len = */stderr.read(buffer); - } - } + } - sess.close(); - conn.close(); - - if (!success) { - retry++; - if (retry == MAX_RETRY_LINUX) { - result = "SSH Linux Network test fail"; - } - } - - return result; - } catch (Exception e) { - retry++; - s_logger.error("SSH Linux Network test fail with error"); - if (retry == MAX_RETRY_LINUX) { - return "SSH Linux Network test fail with error " - + e.getMessage(); - } - } - i++; - } - } + boolean success = false; + String linuxCommand = null; - public static String createMD5Password(String password) { - MessageDigest md5; + if (i % 10 == 0) + linuxCommand = "rm -rf *; wget http://192.168.1.250/dump.bin && ls -al dump.bin"; + else + linuxCommand = "wget http://192.168.1.250/dump.bin && ls -al dump.bin"; - try { - md5 = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new CloudRuntimeException("Error", e); - } + Session sess = conn.openSession(); + s_logger.info("User " + _account.get() + " executing : " + linuxCommand); + sess.execCommand(linuxCommand); - md5.reset(); - BigInteger pwInt = new BigInteger(1, md5.digest(password.getBytes())); + InputStream stdout = sess.getStdout(); + InputStream stderr = sess.getStderr(); - // make sure our MD5 hash value is 32 digits long... - StringBuffer sb = new StringBuffer(); - String pwStr = pwInt.toString(16); - int padding = 32 - pwStr.length(); - for (int i = 0; i < padding; i++) { - sb.append('0'); - } - sb.append(pwStr); - return sb.toString(); - } - - - public static Element queryAsyncJobResult (String host, InputStream inputStream) { - Element returnBody = null; - - Map values = getSingleValueFromXML(inputStream, - new String[] { "jobid" }); - String jobId = values.get("jobid"); - - if (jobId == null) { - s_logger.error("Unable to get a jobId"); - return null; - } - - //s_logger.info("Job id is " + jobId); - String resultUrl = host + "?command=queryAsyncJobResult&jobid=" + jobId; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(resultUrl); - while (true) { - try { - client.executeMethod(method); - //s_logger.info("Method is executed successfully. Following url was sent " + resultUrl); - InputStream is = method.getResponseBodyAsStream(); - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - DocumentBuilder builder = factory.newDocumentBuilder(); - Document doc = builder.parse(is); - returnBody = doc.getDocumentElement(); - doc.getDocumentElement().normalize(); - Element jobStatusTag = (Element) returnBody.getElementsByTagName("jobstatus").item(0); - String jobStatus = jobStatusTag.getTextContent(); - if(jobStatus.equals("0")) { - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - } - } else { - break; - } - - } catch (Exception ex) { - s_logger.error(ex); - } - } - return returnBody; - } + + byte[] buffer = new byte[8192]; + while (true) { + if ((stdout.available() == 0) && (stderr.available() == 0)) { + int conditions = sess.waitForCondition( + ChannelCondition.STDOUT_DATA + | ChannelCondition.STDERR_DATA + | ChannelCondition.EOF, 120000); + + if ((conditions & ChannelCondition.TIMEOUT) != 0) { + s_logger + .info("Timeout while waiting for data from peer."); + return null; + } + + if ((conditions & ChannelCondition.EOF) != 0) { + if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { + break; + } + } + } + + while (stdout.available() > 0) { + success = true; + int len = stdout.read(buffer); + if (len > 0) // this check is somewhat paranoid + s_logger.info(new String(buffer, 0, len)); + } + + while (stderr.available() > 0) { + /* int len = */stderr.read(buffer); + } + } + + sess.close(); + conn.close(); + + if (!success) { + retry++; + if (retry == MAX_RETRY_LINUX) { + result = "SSH Linux Network test fail"; + } + } + + return result; + } catch (Exception e) { + retry++; + s_logger.error("SSH Linux Network test fail with error"); + if (retry == MAX_RETRY_LINUX) { + return "SSH Linux Network test fail with error " + + e.getMessage(); + } + } + i++; + } + } + + public static String createMD5Password(String password) { + MessageDigest md5; + + try { + md5 = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new CloudRuntimeException("Error", e); + } + + md5.reset(); + BigInteger pwInt = new BigInteger(1, md5.digest(password.getBytes())); + + // make sure our MD5 hash value is 32 digits long... + StringBuffer sb = new StringBuffer(); + String pwStr = pwInt.toString(16); + int padding = 32 - pwStr.length(); + for (int i = 0; i < padding; i++) { + sb.append('0'); + } + sb.append(pwStr); + return sb.toString(); + } + + + public static Element queryAsyncJobResult (String host, InputStream inputStream) { + Element returnBody = null; + + Map values = getSingleValueFromXML(inputStream, + new String[] { "jobid" }); + String jobId = values.get("jobid"); + + if (jobId == null) { + s_logger.error("Unable to get a jobId"); + return null; + } + + //s_logger.info("Job id is " + jobId); + String resultUrl = host + "?command=queryAsyncJobResult&jobid=" + jobId; + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(resultUrl); + while (true) { + try { + client.executeMethod(method); + //s_logger.info("Method is executed successfully. Following url was sent " + resultUrl); + InputStream is = method.getResponseBodyAsStream(); + DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); + DocumentBuilder builder = factory.newDocumentBuilder(); + Document doc = builder.parse(is); + returnBody = doc.getDocumentElement(); + doc.getDocumentElement().normalize(); + Element jobStatusTag = (Element) returnBody.getElementsByTagName("jobstatus").item(0); + String jobStatus = jobStatusTag.getTextContent(); + if(jobStatus.equals("0")) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + } + } else { + break; + } + + } catch (Exception ex) { + s_logger.error(ex); + } + } + return returnBody; + } } diff --git a/test/src/com/cloud/test/stress/TestClientWithAPI.java b/test/src/com/cloud/test/stress/TestClientWithAPI.java index 84cb087841b..c1058f82fc6 100644 --- a/test/src/com/cloud/test/stress/TestClientWithAPI.java +++ b/test/src/com/cloud/test/stress/TestClientWithAPI.java @@ -49,7 +49,6 @@ import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; -import com.cloud.utils.encoding.Base64; import com.cloud.utils.exception.CloudRuntimeException; import com.trilead.ssh2.ChannelCondition; import com.trilead.ssh2.Connection; @@ -182,7 +181,7 @@ public class TestClientWithAPI { if (arg.equals("-no")) { networkOfferingId = iter.next(); } - + if (arg.equals("-pass")) { vmPassword = iter.next(); } @@ -210,6 +209,7 @@ public class TestClientWithAPI { for (int i = 0; i < numThreads; i++) { new Thread(new Runnable() { + @Override public void run() { do { String username = null; @@ -440,7 +440,7 @@ public class TestClientWithAPI { for (int j = 0; j < childNodes.getLength(); j++) { Node n = childNodes.item(j); if ("id".equals(n.getNodeName())) { - // if ("ipaddress".equals(n.getNodeName())) { + // if ("ipaddress".equals(n.getNodeName())) { ipAddress = n.getTextContent(); } else if ("issourcenat".equals(n.getNodeName())) { isSourceNat = Boolean.parseBoolean(n.getTextContent()); @@ -478,7 +478,7 @@ public class TestClientWithAPI { } else if("ipaddress".equals(n.getNodeName())) { - ipAddress = n.getTextContent(); + ipAddress = n.getTextContent(); } else if ("issourcenat".equals(n.getNodeName())) { isSourceNat = Boolean.parseBoolean(n.getTextContent()); @@ -609,7 +609,7 @@ public class TestClientWithAPI { s_logger.error("Create virtual network failed for account " + username + " with error code :" + responseCode + ", aborting deployment test. The command was sent with url " + url); return -1; } -/* + /* // --------------------------------- // CREATE DIRECT NETWORK // --------------------------------- @@ -629,9 +629,9 @@ public class TestClientWithAPI { s_logger.error("Create direct network failed for account " + username + " with error code :" + responseCode + ", aborting deployment test. The command was sent with url " + url); return -1; } -*/ - - + */ + + // --------------------------------- // DEPLOY LINUX VM // --------------------------------- @@ -706,7 +706,7 @@ public class TestClientWithAPI { } else { - s_logger.info("Associate IP Address response code: " + responseCode); + s_logger.info("Associate IP Address response code: " + responseCode); long publicIpId = Long.parseLong(values.get("id")); s_logger.info("Associate IP's Id: " + publicIpId); _publicIpId.set(values.get("id")); @@ -715,13 +715,13 @@ public class TestClientWithAPI { s_logger.error("associate ip address for windows vm failed with error code: " + responseCode + ". Following URL was sent: " + url); return responseCode; } - + String encodedPublicIpId = URLEncoder.encode(_publicIpId.get(), "UTF-8"); requestToSign = "apikey=" + encodedApiKey + "&command=listPublicIpAddresses"+"&id="+ encodedPublicIpId; requestToSign = requestToSign.toLowerCase(); signature = signRequest(requestToSign, _secretKey.get()); encodedSignature = URLEncoder.encode(signature, "UTF-8"); - + url = developerServer + "?command=listPublicIpAddresses&apikey=" + encodedApiKey + "&id=" + encodedPublicIpId + "&signature=" + encodedSignature; client = new HttpClient(); method = new GetMethod(url); @@ -730,9 +730,9 @@ public class TestClientWithAPI { s_logger.info("list ip addresses for user " + userId + " response code: " + responseCode); if (responseCode == 200) { InputStream is = method.getResponseBodyAsStream(); - // InputStream ips = method.getResponseBodyAsStream(); + // InputStream ips = method.getResponseBodyAsStream(); List ipAddressValues = getIPs(is, false); - // List ipAddressVals = getIPs(is, false, true); + // List ipAddressVals = getIPs(is, false, true); if ((ipAddressValues != null) && !ipAddressValues.isEmpty()) { _windowsIpId.set(ipAddressValues.get(0)); _windowsIP.set(ipAddressValues.get(1)); @@ -743,7 +743,7 @@ public class TestClientWithAPI { s_logger.error("list ip addresses failed with error code: " + responseCode + ". Following URL was sent: " + url); return responseCode; } - + // --------------------------------- // Use the SourceNat IP for linux // --------------------------------- @@ -776,17 +776,17 @@ public class TestClientWithAPI { return responseCode; } } - + //-------------------------------------------- // Enable Static NAT for the Source NAT Ip //-------------------------------------------- String encodedSourceNatPublicIpId = URLEncoder.encode(_linuxIpId.get(), "UTF-8"); - - /* requestToSign = "apikey=" + encodedApiKey + "&command=enableStaticNat"+"&id=" + encodedSourceNatPublicIpId + "&virtualMachineId=" + encodedVmId;; + + /* requestToSign = "apikey=" + encodedApiKey + "&command=enableStaticNat"+"&id=" + encodedSourceNatPublicIpId + "&virtualMachineId=" + encodedVmId;; requestToSign = requestToSign.toLowerCase(); signature = signRequest(requestToSign, _secretKey.get()); encodedSignature = URLEncoder.encode(signature, "UTF-8"); - + url = developerServer + "?command=enableStaticNat&apikey=" + encodedApiKey + "&signature=" + encodedSignature + "&id=" + encodedSourceNatPublicIpId + "&virtualMachineId=" + encodedVmId; client = new HttpClient(); method = new GetMethod(url); @@ -801,7 +801,7 @@ public class TestClientWithAPI { s_logger.error("Enable Static NAT failed with error code: " + responseCode + ". Following URL was sent: " + url); return responseCode; } - */ + */ // ------------------------------------------------------------- // CREATE IP FORWARDING RULE -- Linux VM // ------------------------------------------------------------- @@ -825,7 +825,7 @@ public class TestClientWithAPI { long ipfwdid = Long.parseLong(values.get("id")); s_logger.info("got Port Forwarding Rule's Id:" + ipfwdid); _linipfwdid.set(values.get("id")); - + } else { s_logger.error("Port forwarding rule creation failed with error code: " + responseCode + ". Following URL was sent: " + url); return responseCode; @@ -882,7 +882,7 @@ public class TestClientWithAPI { String encodedTemplateId = URLEncoder.encode("" + templateId, "UTF-8"); encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); String encodedNetworkIds = URLEncoder.encode(_networkId.get()+",206","UTF-8"); - + requestToSign = "apikey=" + encodedApiKey + "&command=deployVirtualMachine&diskofferingid=" + diskOfferingId + "&networkids=" + encodedNetworkIds + "&serviceofferingid=" + encodedServiceOfferingId + "&templateid=" + encodedTemplateId + "&zoneid=" + encodedZoneId; requestToSign = requestToSign.toLowerCase(); @@ -918,14 +918,14 @@ public class TestClientWithAPI { //-------------------------------------------- // Enable Static NAT for the Non Source NAT Ip //-------------------------------------------- - + encodedVmId = URLEncoder.encode(_windowsVmId.get(), "UTF-8"); encodedPublicIpId = URLEncoder.encode(_publicIpId.get(), "UTF-8"); requestToSign = "apikey=" + encodedApiKey + "&command=enableStaticNat"+"&ipaddressid="+ encodedPublicIpId + "&virtualMachineId=" + encodedVmId; requestToSign = requestToSign.toLowerCase(); signature = signRequest(requestToSign, _secretKey.get()); encodedSignature = URLEncoder.encode(signature, "UTF-8"); - + url = developerServer + "?command=enableStaticNat&apikey=" + encodedApiKey + "&ipaddressid=" + encodedPublicIpId + "&signature=" + encodedSignature + "&virtualMachineId=" + encodedVmId; client = new HttpClient(); method = new GetMethod(url); @@ -941,7 +941,7 @@ public class TestClientWithAPI { return responseCode; } - + // ------------------------------------------------------------- // CREATE IP FORWARDING RULE -- Windows VM // ------------------------------------------------------------- @@ -972,7 +972,7 @@ public class TestClientWithAPI { s_logger.error("Port forwarding rule creation failed with error code: " + responseCode + ". Following URL was sent: " + url); return responseCode; } - } + } } return responseCode; } @@ -1178,7 +1178,7 @@ public class TestClientWithAPI { } // Create volume from the snapshot created on the previous step and attach it to the running vm - /* encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); + /* encodedApiKey = URLEncoder.encode(_apiKey.get(), "UTF-8"); requestToSign = "apikey=" + encodedApiKey + "&command=createVolume&name=" + _account.get() + "&snapshotid=" + _snapshot.get(); requestToSign = requestToSign.toLowerCase(); signature = signRequest(requestToSign, _secretKey.get()); @@ -1222,7 +1222,7 @@ public class TestClientWithAPI { return responseCode; } } -*/ + */ // ----------------------------- // Execute reboot/stop/start commands for the VMs before deleting the account - made to exercise xen // ----------------------------- @@ -1896,7 +1896,7 @@ public class TestClientWithAPI { InputStream input = method.getResponseBodyAsStream(); Element el = queryAsyncJobResult(server, input); s_logger.info("IP forwarding rule was successfully deleted"); - + } else { s_logger.error("IP forwarding rule creation failed with error code: " + responseCode + ". Following URL was sent: " + url); return responseCode; @@ -1911,7 +1911,7 @@ public class TestClientWithAPI { requestToSign = requestToSign.toLowerCase(); signature = signRequest(requestToSign, _secretKey.get()); encodedSignature = URLEncoder.encode(signature, "UTF-8"); - + url = developerServer + "?command=disableStaticNat&apikey=" + encodedApiKey + "&id=" + encodedPublicIpId + "&signature=" + encodedSignature ; client = new HttpClient(); method = new GetMethod(url); @@ -1926,7 +1926,7 @@ public class TestClientWithAPI { s_logger.error("Disable Static NAT failed with error code: " + responseCode + ". Following URL was sent: " + url); return responseCode; } - + // ----------------------------------------- // DISASSOCIATE IP ADDRESSES // ----------------------------------------- @@ -1946,7 +1946,7 @@ public class TestClientWithAPI { InputStream input = method.getResponseBodyAsStream(); Element disassocipel = queryAsyncJobResult(server, input); Map success = getSingleValueFromXML(disassocipel, new String[] {"success"}); - // Map success = getSingleValueFromXML(input, new String[] { "success" }); + // Map success = getSingleValueFromXML(input, new String[] { "success" }); s_logger.info("disassociate ip address..success? " + success.get("success")); } else { s_logger.error("disassociate ip address failed with error code: " + responseCode + ". Following URL was sent: " + url); @@ -1977,7 +1977,7 @@ public class TestClientWithAPI { mac.init(keySpec); mac.update(request.getBytes()); byte[] encryptedBytes = mac.doFinal(); - return Base64.encodeBytes(encryptedBytes); + return org.apache.commons.codec.binary.Base64.encodeBase64String(encryptedBytes); } catch (Exception ex) { s_logger.error("unable to sign request", ex); } diff --git a/test/src/com/cloud/test/utils/UtilsForTest.java b/test/src/com/cloud/test/utils/UtilsForTest.java index 500cea587c7..9926444ffd0 100644 --- a/test/src/com/cloud/test/utils/UtilsForTest.java +++ b/test/src/com/cloud/test/utils/UtilsForTest.java @@ -30,208 +30,208 @@ import javax.crypto.spec.SecretKeySpec; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; +import org.apache.commons.codec.binary.Base64; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; -import com.cloud.utils.encoding.Base64; import com.cloud.utils.exception.CloudRuntimeException; public class UtilsForTest { - - private static DocumentBuilderFactory factory = DocumentBuilderFactory - .newInstance(); - - public static boolean verifyTags (Map params) { - boolean result = true; - for (String value : params.keySet()) { - if (params.get(value) == null) { - result=false; - } - } - return result; - } - - public static boolean verifyTagValues (Map params, Map pattern) { - boolean result = true; - - if (pattern != null) { - for (String value : pattern.keySet()) { - if (!pattern.get(value).equals(params.get(value))) { - result=false; - System.out.println("Tag " + value + " has " + params.get(value) + " while expected value is: " + pattern.get(value)); - } - } - } - return result; - } - - - public static Map parseXML(InputStream is, - String[] tagNames) { - Map returnValues = new HashMap(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement - .getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - System.out.println("no " + tagNames[i] - + " tag in the response"); - returnValues.put(tagNames[i], null); - } else { - returnValues.put(tagNames[i], targetNodes.item(0) - .getTextContent()); - } - } - } catch (Exception ex) { - System.out.println("error processing XML"); - ex.printStackTrace(); - } - return returnValues; - } - - - public static ArrayList> parseMulXML (InputStream is, String[] tagNames){ - ArrayList> returnValues = new ArrayList>(); - - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement - .getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - System.out.println("no " + tagNames[i] - + " tag in XML response...returning null"); - } else { - for (int j = 0; j < targetNodes.getLength(); j++) { - HashMap valueList = new HashMap (); - Node node = targetNodes.item(j); - //parse child nodes - NodeList child = node.getChildNodes(); - for (int c=0; c params) { + boolean result = true; + for (String value : params.keySet()) { + if (params.get(value) == null) { + result=false; + } + } + return result; + } - // make sure our MD5 hash value is 32 digits long... - StringBuffer sb = new StringBuffer(); - String pwStr = pwInt.toString(16); - int padding = 32 - pwStr.length(); - for (int i = 0; i < padding; i++) { - sb.append('0'); - } - sb.append(pwStr); - return sb.toString(); - } - - - - - - - - public static Map getSingleValueFromXML(InputStream is, - String[] tagNames) { - Map returnValues = new HashMap(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); + public static boolean verifyTagValues (Map params, Map pattern) { + boolean result = true; + + if (pattern != null) { + for (String value : pattern.keySet()) { + if (!pattern.get(value).equals(params.get(value))) { + result=false; + System.out.println("Tag " + value + " has " + params.get(value) + " while expected value is: " + pattern.get(value)); + } + } + } + return result; + } + + + public static Map parseXML(InputStream is, + String[] tagNames) { + Map returnValues = new HashMap(); + try { + DocumentBuilder docBuilder = factory.newDocumentBuilder(); + Document doc = docBuilder.parse(is); + Element rootElement = doc.getDocumentElement(); + + for (int i = 0; i < tagNames.length; i++) { + NodeList targetNodes = rootElement + .getElementsByTagName(tagNames[i]); + if (targetNodes.getLength() <= 0) { + System.out.println("no " + tagNames[i] + + " tag in the response"); + returnValues.put(tagNames[i], null); + } else { + returnValues.put(tagNames[i], targetNodes.item(0) + .getTextContent()); + } + } + } catch (Exception ex) { + System.out.println("error processing XML"); + ex.printStackTrace(); + } + return returnValues; + } + + + public static ArrayList> parseMulXML (InputStream is, String[] tagNames){ + ArrayList> returnValues = new ArrayList>(); + + try { + DocumentBuilder docBuilder = factory.newDocumentBuilder(); + Document doc = docBuilder.parse(is); + Element rootElement = doc.getDocumentElement(); + for (int i = 0; i < tagNames.length; i++) { + NodeList targetNodes = rootElement + .getElementsByTagName(tagNames[i]); + if (targetNodes.getLength() <= 0) { + System.out.println("no " + tagNames[i] + + " tag in XML response...returning null"); + } else { + for (int j = 0; j < targetNodes.getLength(); j++) { + HashMap valueList = new HashMap (); + Node node = targetNodes.item(j); + //parse child nodes + NodeList child = node.getChildNodes(); + for (int c=0; c getSingleValueFromXML(InputStream is, + String[] tagNames) { + Map returnValues = new HashMap(); + try { + DocumentBuilder docBuilder = factory.newDocumentBuilder(); + Document doc = docBuilder.parse(is); + Element rootElement = doc.getDocumentElement(); + + for (int i = 0; i < tagNames.length; i++) { + NodeList targetNodes = rootElement + .getElementsByTagName(tagNames[i]); + if (targetNodes.getLength() <= 0) { + System.out.println("no " + tagNames[i] + + " tag in XML response...returning null"); + } else { + returnValues.put(tagNames[i], targetNodes.item(0) + .getTextContent()); + } + } + } catch (Exception ex) { + System.out.println("error processing XML"); + ex.printStackTrace(); + } + return returnValues; + } + + + public static Map> getMultipleValuesFromXML( + InputStream is, String[] tagNames) { + Map> returnValues = new HashMap>(); + try { + DocumentBuilder docBuilder = factory.newDocumentBuilder(); + Document doc = docBuilder.parse(is); + Element rootElement = doc.getDocumentElement(); + for (int i = 0; i < tagNames.length; i++) { + NodeList targetNodes = rootElement + .getElementsByTagName(tagNames[i]); + if (targetNodes.getLength() <= 0) { + System.out.println("no " + tagNames[i] + + " tag in XML response...returning null"); + } else { + List valueList = new ArrayList(); + for (int j = 0; j < targetNodes.getLength(); j++) { + Node node = targetNodes.item(j); + valueList.add(node.getTextContent()); + } + returnValues.put(tagNames[i], valueList); + } + } + } catch (Exception ex) { + System.out.println(ex); + } + return returnValues; + } + + + + public static String signRequest(String request, String key) { + try { + Mac mac = Mac.getInstance("HmacSHA1"); + SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), + "HmacSHA1"); + mac.init(keySpec); + mac.update(request.getBytes()); + byte[] encryptedBytes = mac.doFinal(); + //System.out.println("HmacSHA1 hash: " + encryptedBytes); + return Base64.encodeBase64String(encryptedBytes); + } catch (Exception ex) { + System.out.println("unable to sign request"); + ex.printStackTrace(); + } + return null; + } - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement - .getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - System.out.println("no " + tagNames[i] - + " tag in XML response...returning null"); - } else { - returnValues.put(tagNames[i], targetNodes.item(0) - .getTextContent()); - } - } - } catch (Exception ex) { - System.out.println("error processing XML"); - ex.printStackTrace(); - } - return returnValues; - } - - - public static Map> getMultipleValuesFromXML( - InputStream is, String[] tagNames) { - Map> returnValues = new HashMap>(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement - .getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - System.out.println("no " + tagNames[i] - + " tag in XML response...returning null"); - } else { - List valueList = new ArrayList(); - for (int j = 0; j < targetNodes.getLength(); j++) { - Node node = targetNodes.item(j); - valueList.add(node.getTextContent()); - } - returnValues.put(tagNames[i], valueList); - } - } - } catch (Exception ex) { - System.out.println(ex); - } - return returnValues; - } - - - - public static String signRequest(String request, String key) { - try { - Mac mac = Mac.getInstance("HmacSHA1"); - SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), - "HmacSHA1"); - mac.init(keySpec); - mac.update(request.getBytes()); - byte[] encryptedBytes = mac.doFinal(); - //System.out.println("HmacSHA1 hash: " + encryptedBytes); - return Base64.encodeBytes(encryptedBytes); - } catch (Exception ex) { - System.out.println("unable to sign request"); - ex.printStackTrace(); - } - return null; - } - } diff --git a/tools/devcloud/README b/tools/devcloud/README index ef2bcc00574..b0161543284 100644 --- a/tools/devcloud/README +++ b/tools/devcloud/README @@ -21,6 +21,9 @@ NOTE - This folder is a work in progress. The project has not determined how to best establish a nightly DevCloud build process, or how to distribute the image. +=========================================================== +Contents: + This folder contains various scripts used to build the devcloud image. devcloudsetup.sh - the origional devcloud build script (assumes an Ubuntu 12.04 VM image) @@ -28,3 +31,26 @@ build_vagrant_basebox.sh - a script that uses VirtualBox, VeeWee, Vagrant (patch veewee - configuration files used to build a basic Ubuntu 12.04 vagrant box via VeeWee basebuild - The Vagrantfile and puppet module that gets applied to the basic Ubuntu 12.04 box devcloudbox - The Vagrantfile and puppet module that is used with the [hopefully] distributed devcloud base box + +=========================================================== +Instructions: + +To build a "devcloud base box", run you need a system with VirtualBox and rvm +installed (use ruby 1.9.2). Run build_vagrant_basebox.sh to build the base box. + +To use the "devcloud base box" that is created in the previous step, you +need to have installed a forked version of Vagrant (until we make the changes +plugins instead of direct source patches) that can be found here: + + +Once installed per the Vagrant installation process, run: + +vagrant box add devcloud [path to devcloud.box] + +Then, either go into the devcloudbox folder of your checked out +version of the CloudStack code (incubator-cloudstack/tools/devcloud/devcloudbox), +or copy the contents of that folder to another location. + +Assuming the patched Vagrant installation is working, you then +simply run "vagrant up" from within that directory. + diff --git a/tools/devcloud/devcloud.cfg b/tools/devcloud/devcloud.cfg new file mode 100644 index 00000000000..c0ea6c0b9f5 --- /dev/null +++ b/tools/devcloud/devcloud.cfg @@ -0,0 +1,91 @@ +{ + "zones": [ + { + "name": "DevCloud0", + "providers": [ + { + "broadcastdomainrange": "ZONE", + "name": "VirtualRouter" + } + ], + "dns2": "4.4.4.4", + "dns1": "8.8.8.8", + "networktype": "Basic", + "pods": [ + { + "endip": "10.0.2.220", + "name": "test00", + "startip": "10.0.2.200", + "guestIpRanges": [ + { + "startip": "10.0.2.100", + "endip": "10.0.2.199", + "netmask": "255.255.255.0", + "gateway": "10.0.2.2" + } + ], + "netmask": "255.255.255.0", + "clusters": [ + { + "clustername": "test000", + "hypervisor": "XenServer", + "hosts": [ + { + "username": "root", + "url": "http://10.0.2.15/", + "password": "password" + } + ], + "clustertype": "CloudManaged" + } + ], + "gateway": "10.0.2.2" + } + ], + "internaldns1": "10.0.2.3", + "internaldns2": "10.0.2.3", + "secondaryStorages": [ + { + "url": "nfs://10.0.2.15/opt/storage/secondary" + } + ] + } + ], + "dbSvr": { + "dbSvr": "localhost", + "passwd": "cloud", + "db": "cloud", + "port": 3306, + "user": "cloud" + }, + "logger": [ + { + "name": "TestClient", + "file": "/tmp/testclient.log" + }, + { + "name": "TestCase", + "file": "/tmp/testcase.log" + } + ], + "globalConfig": [ + { + "name": "expunge.workers", + "value": "3" + }, + { + "name": "expunge.delay", + "value": "60" + }, + { + "name": "expunge.interval", + "value": "60" + } + ], + "mgtSvr": [ + { + "mgtSvrIp": "127.0.0.1", + "port": 8096 + } + ] +} diff --git a/ui/scripts/ui/widgets/detailView.js b/ui/scripts/ui/widgets/detailView.js index eb39b80c05a..fc2ae459243 100644 --- a/ui/scripts/ui/widgets/detailView.js +++ b/ui/scripts/ui/widgets/detailView.js @@ -846,7 +846,7 @@ context: context }) : true ) : true; - if ($actions.find('div.action').size() || (detailViewArgs.viewAll && showViewAll)) { + if ($actions && ($actions.find('div.action').size() || (detailViewArgs.viewAll && showViewAll))) { $actions.prependTo($firstRow.closest('div.detail-group').closest('.details')); } if (detailViewArgs.viewAll && showViewAll) { diff --git a/ui/scripts/ui/widgets/multiEdit.js b/ui/scripts/ui/widgets/multiEdit.js index 53e01eb0329..55f9917c427 100644 --- a/ui/scripts/ui/widgets/multiEdit.js +++ b/ui/scripts/ui/widgets/multiEdit.js @@ -785,6 +785,11 @@ $('
').addClass('button add-vm custom-action') .html(_l(field.custom.buttonLabel)) .click(function() { + if (field.custom.requireValidation && + !$multiForm.valid()) return false; + + var formData = getMultiData($multi); + field.custom.action({ context: context, data: $td.data('multi-custom-data'), @@ -794,6 +799,8 @@ } } }); + + return false; }).appendTo($td); } else if (field.addButton) { $addVM = $('
').addClass('button add-vm').html( diff --git a/utils/src/com/cloud/utils/component/ComponentLocator.java b/utils/src/com/cloud/utils/component/ComponentLocator.java index 6669c0ed5f1..dea34d9ebea 100755 --- a/utils/src/com/cloud/utils/component/ComponentLocator.java +++ b/utils/src/com/cloud/utils/component/ComponentLocator.java @@ -71,15 +71,48 @@ import com.cloud.utils.mgmt.JmxUtil; import com.cloud.utils.mgmt.ManagementBean; /** - * ComponentLocator manages all of the adapters within a system. It operates on - * top of an components.xml and uses reflection to instantiate all of the - * adapters. It also supports rereading of all of the adapters. + * ComponentLocator ties together several different concepts. First, it + * deals with how a system should be put together. It manages different + * types of components: + * - Manager: Singleton implementation of a certain process. + * - Adapter: Different singleton implementations for the same functions. + * - SystemIntegrityChecker: Singletons that are called at the load time. + * - Dao: Data Access Objects. + * + * These components can be declared in several ways: + * - ComponentLibrary - A Java class that declares the above components. The + * advantage of declaring components here is they change automatically + * with any refactoring. + * - components specification - An xml file that overrides the + * ComponentLibrary. The advantage of declaring components here is + * they can change by hand on every deployment. + * + * The two are NOT mutually exclusive. ComponentLocator basically locates + * the components specification, which specifies the ComponentLibrary within. + * Components found in the ComponentLibrary are overridden by components + * found in components specification. + * + * Components specification can also be nested. One components specification + * can point to another components specification and, therefore, "inherits" + * those components but still override one or more components. ComponentLocator + * reads the child components specification first and follow the chain up. + * the child's components overrides the ones in the parent. + * + * ComponentLocator looks for the components specification as follows: + * 1. By following the path specified by "cloud-stack-components-specification" + * within the environment.properties file. + * 2. Look for components.xml in the class path. + * + * ComponentLocator also ties in component injection. Components can specify + * an @Inject annotation to components ComponentLocator knows. When + * instantiating components, ComponentLocator attempts to inject these + * components. * **/ @SuppressWarnings("unchecked") public class ComponentLocator implements ComponentLocatorMBean { protected static final Logger s_logger = Logger.getLogger(ComponentLocator.class); - + protected static final ThreadLocal s_tl = new ThreadLocal(); protected static final ConcurrentHashMap, Singleton> s_singletons = new ConcurrentHashMap, Singleton>(111); protected static final HashMap s_locators = new HashMap(); @@ -90,7 +123,7 @@ public class ComponentLocator implements ComponentLocatorMBean { protected static CallbackFilter s_callbackFilter = new DatabaseCallbackFilter(); protected static final List> s_interceptors = new ArrayList>(); protected static CleanupThread s_janitor = null; - + protected HashMap> _adapterMap; protected HashMap> _managerMap; protected LinkedHashMap> _checkerMap; @@ -98,8 +131,8 @@ public class ComponentLocator implements ComponentLocatorMBean { protected String _serverName; protected Object _component; protected HashMap, Class> _factories; - protected HashMap> _pluggableServicesMap; - + protected HashMap> _pluginsMap; + static { if (s_janitor == null) { s_janitor = new CleanupThread(); @@ -118,7 +151,7 @@ public class ComponentLocator implements ComponentLocatorMBean { public String getLocatorName() { return _serverName; } - + @Override public String getName() { return getLocatorName(); @@ -133,7 +166,7 @@ public class ComponentLocator implements ComponentLocatorMBean { _checkerMap = new LinkedHashMap>(); _adapterMap = new HashMap>(); _factories = new HashMap, Class>(); - _pluggableServicesMap = new LinkedHashMap>(); + _pluginsMap = new LinkedHashMap>(); File file = PropertiesUtil.findConfigFile(filename); if (file == null) { s_logger.info("Unable to find " + filename); @@ -157,7 +190,7 @@ public class ComponentLocator implements ComponentLocatorMBean { _daoMap.putAll(parentLocator._daoMap); _managerMap.putAll(parentLocator._managerMap); _factories.putAll(parentLocator._factories); - _pluggableServicesMap.putAll(parentLocator._pluggableServicesMap); + _pluginsMap.putAll(parentLocator._pluginsMap); } ComponentLibrary library = null; @@ -168,15 +201,15 @@ public class ComponentLocator implements ComponentLocatorMBean { _managerMap.putAll(library.getManagers()); adapters.putAll(library.getAdapters()); _factories.putAll(library.getFactories()); - _pluggableServicesMap.putAll(library.getPluggableServices()); + _pluginsMap.putAll(library.getPluggableServices()); } _daoMap.putAll(handler.daos); _managerMap.putAll(handler.managers); _checkerMap.putAll(handler.checkers); adapters.putAll(handler.adapters); - _pluggableServicesMap.putAll(handler.pluggableServices); - + _pluginsMap.putAll(handler.pluggableServices); + return new Pair>>>(handler, adapters); } catch (ParserConfigurationException e) { s_logger.error("Unable to load " + _serverName + " due to errors while parsing " + filename, e); @@ -203,7 +236,9 @@ public class ComponentLocator implements ComponentLocatorMBean { s_logger.info("Skipping configuration using " + filename); return; } - + + instantiatePluggableServices(); + XmlHandler handler = result.first(); HashMap>> adapters = result.second(); try { @@ -220,13 +255,12 @@ public class ComponentLocator implements ComponentLocatorMBean { startAdapters(); //TODO do we need to follow the instantiate -> inject -> configure -> start -> stop flow of singletons like managers/adapters? //TODO do we need to expose pluggableServices to MBean (provide getNames?) - instantiatePluggableServices(); } catch (CloudRuntimeException e) { s_logger.error("Unable to load configuration for " + _serverName + " from " + filename, e); System.exit(1); } catch (Exception e) { - s_logger.error("Unable to load configuration for " + _serverName + " from " + filename, e); - System.exit(1); + s_logger.error("Unable to load configuration for " + _serverName + " from " + filename, e); + System.exit(1); } } @@ -288,7 +322,7 @@ public class ComponentLocator implements ComponentLocatorMBean { } } } - + private static Object createInstance(Class clazz, boolean inject, boolean singleton, Object... args) { Factory factory = null; Singleton entity = null; @@ -313,8 +347,8 @@ public class ComponentLocator implements ComponentLocatorMBean { factory = info.factory; } } - - + + Class[] argTypes = null; if (args != null && args.length > 0) { Constructor[] constructors = clazz.getConstructors(); @@ -334,49 +368,49 @@ public class ComponentLocator implements ComponentLocatorMBean { } } } - + if (argTypes == null) { throw new CloudRuntimeException("Unable to find constructor to match parameters given: " + clazz.getName()); } - + entity = new Singleton(factory.newInstance(argTypes, args, s_callbacks)); } else { entity = new Singleton(factory.newInstance(s_callbacks)); } - + if (inject) { inject(clazz, entity.singleton); entity.state = Singleton.State.Injected; } - + if (singleton) { synchronized(s_factories) { s_singletons.put(clazz, entity); } } - + return entity.singleton; } - + protected ComponentInfo> getDao(String name) { ComponentInfo> info = _daoMap.get(name); if (info == null) { throw new CloudRuntimeException("Unable to find DAO " + name); } - + return info; } public static synchronized Object getComponent(String componentName) { - synchronized(_hasCheckerRun) { - /* System Integrity checker will run before all components really loaded */ - if (!_hasCheckerRun && !componentName.equalsIgnoreCase(SystemIntegrityChecker.Name)) { - ComponentLocator.getComponent(SystemIntegrityChecker.Name); - _hasCheckerRun = true; - } - } - + synchronized(_hasCheckerRun) { + /* System Integrity checker will run before all components really loaded */ + if (!_hasCheckerRun && !componentName.equalsIgnoreCase(SystemIntegrityChecker.Name)) { + ComponentLocator.getComponent(SystemIntegrityChecker.Name); + _hasCheckerRun = true; + } + } + ComponentLocator locator = s_locators.get(componentName); if (locator == null) { locator = ComponentLocator.getLocator(componentName); @@ -441,10 +475,10 @@ public class ComponentLocator implements ComponentLocatorMBean { } } } - + protected static void inject(Class clazz, Object entity) { ComponentLocator locator = ComponentLocator.getCurrentLocator(); - + do { Field[] fields = clazz.getDeclaredFields(); for (Field field : fields) { @@ -467,11 +501,11 @@ public class ComponentLocator implements ComponentLocatorMBean { s_logger.trace("Other:" + fc.getName()); instance = locator.getManager(fc); } - + if (instance == null) { throw new CloudRuntimeException("Unable to inject " + fc.getSimpleName() + " in " + clazz.getSimpleName()); } - + try { field.setAccessible(true); field.set(entity, instance); @@ -545,7 +579,7 @@ public class ComponentLocator implements ComponentLocatorMBean { } return (T)info.instance; } - + protected void configureAdapters() { for (Adapters adapters : _adapterMap.values()) { List> infos = adapters._infos; @@ -597,7 +631,7 @@ public class ComponentLocator implements ComponentLocatorMBean { _adapterMap.put(entry.getKey(), adapters); } } - + protected void instantiateAdapters(Map>> map) { Set>>> entries = map.entrySet(); for (Map.Entry>> entry : entries) { @@ -641,18 +675,26 @@ public class ComponentLocator implements ComponentLocatorMBean { } protected void instantiatePluggableServices() { - Set>> entries = _pluggableServicesMap.entrySet(); + Set>> entries = _pluginsMap.entrySet(); for (Map.Entry> entry : entries) { ComponentInfo info = entry.getValue(); if (info.instance == null) { s_logger.info("Instantiating PluggableService: " + info.name); info.instance = (PluggableService)createInstance(info.clazz, false, info.singleton); + + if (info.instance instanceof Plugin) { + Plugin plugin = (Plugin)info.instance; + + ComponentLibrary lib = plugin.getComponentLibrary(); + _managerMap.putAll(lib.getManagers()); + _daoMap.putAll(lib.getDaos()); + } } } } - + protected ComponentInfo getPluggableService(String name) { - ComponentInfo mgr = _pluggableServicesMap.get(name); + ComponentInfo mgr = _pluginsMap.get(name); return mgr; } @@ -669,7 +711,7 @@ public class ComponentLocator implements ComponentLocatorMBean { public List getAllPluggableServices() { List services = new ArrayList(); - Set>> entries = _pluggableServicesMap.entrySet(); + Set>> entries = _pluginsMap.entrySet(); for (Map.Entry> entry : entries) { ComponentInfo info = entry.getValue(); if (info.instance == null) { @@ -680,11 +722,11 @@ public class ComponentLocator implements ComponentLocatorMBean { } return services; } - + public static T inject(Class clazz) { return (T)createInstance(clazz, true, false); } - + public T createInstance(Class clazz) { Class impl = (Class)_factories.get(clazz); if (impl == null) { @@ -692,11 +734,11 @@ public class ComponentLocator implements ComponentLocatorMBean { } return inject(impl); } - + public static T inject(Class clazz, Object... args) { return (T)createInstance(clazz, true, false, args); } - + @Override public Map> getAdapterNames() { HashMap> result = new HashMap>(); @@ -749,7 +791,7 @@ public class ComponentLocator implements ComponentLocatorMBean { } return new Adapters(key, new ArrayList>()); } - + protected void resetInterceptors(InterceptorLibrary library) { library.addInterceptors(s_interceptors); if (s_interceptors.size() > 0) { @@ -781,7 +823,7 @@ public class ComponentLocator implements ComponentLocatorMBean { s_once = true; } } - + ComponentLocator locator; synchronized (s_locators) { locator = s_locators.get(server); @@ -822,7 +864,7 @@ public class ComponentLocator implements ComponentLocatorMBean { } catch (IOException e) { s_logger.debug("environment.properties could not be loaded:" + e.toString()); } - + if (configFile == null || PropertiesUtil.findConfigFile(configFile) == null) { configFile = "components.xml"; if (PropertiesUtil.findConfigFile(configFile) == null){ @@ -843,31 +885,31 @@ public class ComponentLocator implements ComponentLocatorMBean { List keys = new ArrayList(); T instance; boolean singleton = true; - + protected ComponentInfo() { } - + public List getKeys() { return keys; } - + public String getName() { return name; } - + public ComponentInfo(String name, Class clazz) { this(name, clazz, new ArrayList>(0)); } - + public ComponentInfo(String name, Class clazz, T instance) { this(name, clazz); this.instance = instance; } - + public ComponentInfo(String name, Class clazz, List> params) { this(name, clazz, params, true); } - + public ComponentInfo(String name, Class clazz, List> params, boolean singleton) { this.name = name; this.clazz = clazz; @@ -877,10 +919,10 @@ public class ComponentLocator implements ComponentLocatorMBean { } fillInfo(); } - + protected void fillInfo() { String clazzName = clazz.getName(); - + Local local = clazz.getAnnotation(Local.class); if (local == null) { throw new CloudRuntimeException("Unable to find Local annotation for class " + clazzName); @@ -900,7 +942,7 @@ public class ComponentLocator implements ComponentLocatorMBean { } } } - + public void addParameter(String name, String value) { params.put(name, value); } @@ -964,13 +1006,13 @@ public class ComponentLocator implements ComponentLocatorMBean { if (singleton != null) { info.singleton = Boolean.parseBoolean(singleton); } - + info.fillInfo(); } - + @Override public void startElement(String namespaceURI, String localName, String qName, Attributes atts) - throws SAXException { + throws SAXException { if (qName.equals("interceptor") && s_interceptors.size() == 0) { synchronized(s_interceptors){ if (s_interceptors.size() == 0) { @@ -1001,7 +1043,7 @@ public class ComponentLocator implements ComponentLocatorMBean { throw new CloudRuntimeException("Unable to find " + implementationClass, e); } } - + library = getAttribute(atts, "library"); } } else if (qName.equals("adapters")) { @@ -1041,7 +1083,7 @@ public class ComponentLocator implements ComponentLocatorMBean { checkers.put(info.name, info); s_logger.info("Adding system integrity checker: " + info.name); currentInfo = info; - } else if (qName.equals("pluggableservice")) { + } else if (qName.equals("pluggableservice") || qName.equals("plugin")) { ComponentInfo info = new ComponentInfo(); fillInfo(atts, PluggableService.class, info); s_logger.info("Adding PluggableService: " + info.name); @@ -1096,17 +1138,17 @@ public class ComponentLocator implements ComponentLocatorMBean { } } } - + protected static class InjectInfo { public Factory factory; public Enhancer enhancer; - + public InjectInfo(Enhancer enhancer, Factory factory) { this.factory = factory; this.enhancer = enhancer; } } - + protected static class CleanupThread extends Thread { @Override public void run() { @@ -1133,7 +1175,7 @@ public class ComponentLocator implements ComponentLocatorMBean { } } } - + for (ComponentLocator locator : s_locators.values()) { Iterator> itManagers = locator._managerMap.values().iterator(); while (itManagers.hasNext()) { @@ -1154,7 +1196,7 @@ public class ComponentLocator implements ComponentLocatorMBean { } } } - + static class Singleton { public enum State { Instantiated, @@ -1163,16 +1205,16 @@ public class ComponentLocator implements ComponentLocatorMBean { Started, Stopped } - + public Object singleton; public State state; - + public Singleton(Object singleton) { this.singleton = singleton; this.state = State.Instantiated; } } - + protected class InterceptorDispatcher implements MethodInterceptor { @Override @@ -1200,7 +1242,7 @@ public class ComponentLocator implements ComponentLocatorMBean { } } } - + protected static class InterceptorFilter implements CallbackFilter { @Override public int accept(Method method) { @@ -1215,7 +1257,7 @@ public class ComponentLocator implements ComponentLocatorMBean { } } } - + return index; } } diff --git a/utils/src/com/cloud/utils/component/PluggableService.java b/utils/src/com/cloud/utils/component/PluggableService.java index 8094a4ba1d7..9c946284eea 100644 --- a/utils/src/com/cloud/utils/component/PluggableService.java +++ b/utils/src/com/cloud/utils/component/PluggableService.java @@ -17,6 +17,7 @@ package com.cloud.utils.component; + /** * This interface defines methods for pluggable code within the Cloud Stack. */ @@ -26,5 +27,5 @@ public interface PluggableService { * The config file name that lists API commands supported by this pluggable service */ String getPropertiesFile(); - + } diff --git a/utils/src/com/cloud/utils/component/Plugin.java b/utils/src/com/cloud/utils/component/Plugin.java new file mode 100755 index 00000000000..ffd704c7558 --- /dev/null +++ b/utils/src/com/cloud/utils/component/Plugin.java @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.utils.component; + +import java.util.List; + +import com.cloud.utils.Pair; + + +/** + * CloudStack uses Adapters to implement different capabilities. + * There are different Adapters such as NetworkGuru, NetworkElement, + * HypervisorGuru, DeploymentPlanner, etc. However, Adapters only + * defines what CloudStack needs from the implementation. What about + * what the Adapter itself needs, such as configurations and administrative + * operations, and what if one implementation can + * implement two different Adapters? + * + * Plugin is a CloudStack container for Adapters. It rolls the following + * capabilities into the one package for CloudStack to load at runtime. + * - REST API commands supported by the Plugin. + * - Components needed by the Plugin. + * - Adapters implemented by the Plugin. + * - Database operations + * + */ +public interface Plugin extends PluggableService { + + /** + * Retrieves the component libraries needed by this Plugin. + * ComponentLocator put these components and add them to the startup + * and shutdown processes of CloudStack. This is only needed if the + * Plugin uses ComponentLocator to inject what it needs. If the + * Plugin uses other mechanisms, then it can return null here. + * + * @return a component library that contains the components this Plugin + * contains and needs. + */ + ComponentLibrary getComponentLibrary(); + + /** + * Retrieves the list of Adapters and the interface they implement. It + * can be an empty list if the Plugin does not implement any. + * + * @return list of pairs where the first is the interface and the second + * is the adapter. + */ + List, Class>> getAdapterImplementations(); +} diff --git a/utils/src/com/cloud/utils/encoding/Base64.java b/utils/src/com/cloud/utils/encoding/Base64.java deleted file mode 100755 index 0179714a0fc..00000000000 --- a/utils/src/com/cloud/utils/encoding/Base64.java +++ /dev/null @@ -1,1739 +0,0 @@ -package com.cloud.utils.encoding; - -/** - *

Encodes and decodes to and from Base64 notation.

- *

Homepage: http://iharder.net/base64.

- * - *

- * Change Log: - *

- *
    - *
  • v2.2.1 - Fixed bug using URL_SAFE and ORDERED encodings. Fixed bug - * when using very small files (~< 40 bytes).
  • - *
  • v2.2 - Added some helper methods for encoding/decoding directly from - * one file to the next. Also added a main() method to support command line - * encoding/decoding from one file to the next. Also added these Base64 dialects: - *
      - *
    1. The default is RFC3548 format.
    2. - *
    3. Calling Base64.setFormat(Base64.BASE64_FORMAT.URLSAFE_FORMAT) generates - * http://www.faqs.org/rfcs/rfc3548.html
    4. - *
    5. Calling Base64.setFormat(Base64.BASE64_FORMAT.ORDERED_FORMAT) generates - * URL and file name friendly format that preserves lexical ordering as described - * in http://www.faqs.org/qa/rfcc-1940.html
    6. - *
    - * Special thanks to Jim Kellerman at http://www.powerset.com/ - * for contributing the new Base64 dialects. - *
  • - * - *
  • v2.1 - Cleaned up javadoc comments and unused variables and methods. Added - * some convenience methods for reading and writing to and from files.
  • - *
  • v2.0.2 - Now specifies UTF-8 encoding in places where the code fails on systems - * with other encodings (like EBCDIC).
  • - *
  • v2.0.1 - Fixed an error when decoding a single byte, that is, when the - * encoded data was a single byte.
  • - *
  • v2.0 - I got rid of methods that used booleans to set options. - * Now everything is more consolidated and cleaner. The code now detects - * when data that's being decoded is gzip-compressed and will decompress it - * automatically. Generally things are cleaner. You'll probably have to - * change some method calls that you were making to support the new - * options format (ints that you "OR" together).
  • - *
  • v1.5.1 - Fixed bug when decompressing and decoding to a - * byte[] using decode( String s, boolean gzipCompressed ). - * Added the ability to "suspend" encoding in the Output Stream so - * you can turn on and off the encoding if you need to embed base64 - * data in an otherwise "normal" stream (like an XML file).
  • - *
  • v1.5 - Output stream pases on flush() command but doesn't do anything itself. - * This helps when using GZIP streams. - * Added the ability to GZip-compress objects before encoding them.
  • - *
  • v1.4 - Added helper methods to read/write files.
  • - *
  • v1.3.6 - Fixed OutputStream.flush() so that 'position' is reset.
  • - *
  • v1.3.5 - Added flag to turn on and off line breaks. Fixed bug in input stream - * where last buffer being read, if not completely full, was not returned.
  • - *
  • v1.3.4 - Fixed when "improperly padded stream" error was thrown at the wrong time.
  • - *
  • v1.3.3 - Fixed I/O streams which were totally messed up.
  • - *
- * - *

- * I am placing this code in the Public Domain. Do with it as you will. - * This software comes with no guarantees or warranties but with - * plenty of well-wishing instead! - * Please visit http://iharder.net/base64 - * periodically to check for updates or to contribute improvements. - *

- * - * @author Robert Harder - * @author rob@iharder.net - * @version 2.2.1 - */ -public class Base64 -{ - -/* ******** P U B L I C F I E L D S ******** */ - - - /** No options specified. Value is zero. */ - public final static int NO_OPTIONS = 0; - - /** Specify encoding. */ - public final static int ENCODE = 1; - - - /** Specify decoding. */ - public final static int DECODE = 0; - - - /** Specify that data should be gzip-compressed. */ - public final static int GZIP = 2; - - - /** Don't break lines when encoding (violates strict Base64 specification) */ - public final static int DONT_BREAK_LINES = 8; - - /** - * Encode using Base64-like encoding that is URL- and Filename-safe as described - * in Section 4 of RFC3548: - * http://www.faqs.org/rfcs/rfc3548.html. - * It is important to note that data encoded this way is not officially valid Base64, - * or at the very least should not be called Base64 without also specifying that is - * was encoded using the URL- and Filename-safe dialect. - */ - public final static int URL_SAFE = 16; - - - /** - * Encode using the special "ordered" dialect of Base64 described here: - * http://www.faqs.org/qa/rfcc-1940.html. - */ - public final static int ORDERED = 32; - - -/* ******** P R I V A T E F I E L D S ******** */ - - - /** Maximum line length (76) of Base64 output. */ - private final static int MAX_LINE_LENGTH = 76; - - - /** The equals sign (=) as a byte. */ - private final static byte EQUALS_SIGN = (byte)'='; - - - /** The new line character (\n) as a byte. */ - private final static byte NEW_LINE = (byte)'\n'; - - - /** Preferred encoding. */ - private final static String PREFERRED_ENCODING = "UTF-8"; - - - // I think I end up not using the BAD_ENCODING indicator. - //private final static byte BAD_ENCODING = -9; // Indicates error in encoding - private final static byte WHITE_SPACE_ENC = -5; // Indicates white space in encoding - private final static byte EQUALS_SIGN_ENC = -1; // Indicates equals sign in encoding - - -/* ******** S T A N D A R D B A S E 6 4 A L P H A B E T ******** */ - - /** The 64 valid Base64 values. */ - //private final static byte[] ALPHABET; - /* Host platform me be something funny like EBCDIC, so we hardcode these values. */ - private final static byte[] _STANDARD_ALPHABET = - { - (byte)'A', (byte)'B', (byte)'C', (byte)'D', (byte)'E', (byte)'F', (byte)'G', - (byte)'H', (byte)'I', (byte)'J', (byte)'K', (byte)'L', (byte)'M', (byte)'N', - (byte)'O', (byte)'P', (byte)'Q', (byte)'R', (byte)'S', (byte)'T', (byte)'U', - (byte)'V', (byte)'W', (byte)'X', (byte)'Y', (byte)'Z', - (byte)'a', (byte)'b', (byte)'c', (byte)'d', (byte)'e', (byte)'f', (byte)'g', - (byte)'h', (byte)'i', (byte)'j', (byte)'k', (byte)'l', (byte)'m', (byte)'n', - (byte)'o', (byte)'p', (byte)'q', (byte)'r', (byte)'s', (byte)'t', (byte)'u', - (byte)'v', (byte)'w', (byte)'x', (byte)'y', (byte)'z', - (byte)'0', (byte)'1', (byte)'2', (byte)'3', (byte)'4', (byte)'5', - (byte)'6', (byte)'7', (byte)'8', (byte)'9', (byte)'+', (byte)'/' - }; - - - /** - * Translates a Base64 value to either its 6-bit reconstruction value - * or a negative number indicating some other meaning. - **/ - private final static byte[] _STANDARD_DECODABET = - { - -9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 0 - 8 - -5,-5, // Whitespace: Tab and Linefeed - -9,-9, // Decimal 11 - 12 - -5, // Whitespace: Carriage Return - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 14 - 26 - -9,-9,-9,-9,-9, // Decimal 27 - 31 - -5, // Whitespace: Space - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 33 - 42 - 62, // Plus sign at decimal 43 - -9,-9,-9, // Decimal 44 - 46 - 63, // Slash at decimal 47 - 52,53,54,55,56,57,58,59,60,61, // Numbers zero through nine - -9,-9,-9, // Decimal 58 - 60 - -1, // Equals sign at decimal 61 - -9,-9,-9, // Decimal 62 - 64 - 0,1,2,3,4,5,6,7,8,9,10,11,12,13, // Letters 'A' through 'N' - 14,15,16,17,18,19,20,21,22,23,24,25, // Letters 'O' through 'Z' - -9,-9,-9,-9,-9,-9, // Decimal 91 - 96 - 26,27,28,29,30,31,32,33,34,35,36,37,38, // Letters 'a' through 'm' - 39,40,41,42,43,44,45,46,47,48,49,50,51, // Letters 'n' through 'z' - -9,-9,-9,-9 // Decimal 123 - 126 - /*,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 127 - 139 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 140 - 152 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 153 - 165 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 166 - 178 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 179 - 191 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 192 - 204 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 205 - 217 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 218 - 230 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 231 - 243 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9 // Decimal 244 - 255 */ - }; - - -/* ******** U R L S A F E B A S E 6 4 A L P H A B E T ******** */ - - /** - * Used in the URL- and Filename-safe dialect described in Section 4 of RFC3548: - * http://www.faqs.org/rfcs/rfc3548.html. - * Notice that the last two bytes become "hyphen" and "underscore" instead of "plus" and "slash." - */ - private final static byte[] _URL_SAFE_ALPHABET = - { - (byte)'A', (byte)'B', (byte)'C', (byte)'D', (byte)'E', (byte)'F', (byte)'G', - (byte)'H', (byte)'I', (byte)'J', (byte)'K', (byte)'L', (byte)'M', (byte)'N', - (byte)'O', (byte)'P', (byte)'Q', (byte)'R', (byte)'S', (byte)'T', (byte)'U', - (byte)'V', (byte)'W', (byte)'X', (byte)'Y', (byte)'Z', - (byte)'a', (byte)'b', (byte)'c', (byte)'d', (byte)'e', (byte)'f', (byte)'g', - (byte)'h', (byte)'i', (byte)'j', (byte)'k', (byte)'l', (byte)'m', (byte)'n', - (byte)'o', (byte)'p', (byte)'q', (byte)'r', (byte)'s', (byte)'t', (byte)'u', - (byte)'v', (byte)'w', (byte)'x', (byte)'y', (byte)'z', - (byte)'0', (byte)'1', (byte)'2', (byte)'3', (byte)'4', (byte)'5', - (byte)'6', (byte)'7', (byte)'8', (byte)'9', (byte)'-', (byte)'_' - }; - - /** - * Used in decoding URL- and Filename-safe dialects of Base64. - */ - private final static byte[] _URL_SAFE_DECODABET = - { - -9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 0 - 8 - -5,-5, // Whitespace: Tab and Linefeed - -9,-9, // Decimal 11 - 12 - -5, // Whitespace: Carriage Return - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 14 - 26 - -9,-9,-9,-9,-9, // Decimal 27 - 31 - -5, // Whitespace: Space - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 33 - 42 - -9, // Plus sign at decimal 43 - -9, // Decimal 44 - 62, // Minus sign at decimal 45 - -9, // Decimal 46 - -9, // Slash at decimal 47 - 52,53,54,55,56,57,58,59,60,61, // Numbers zero through nine - -9,-9,-9, // Decimal 58 - 60 - -1, // Equals sign at decimal 61 - -9,-9,-9, // Decimal 62 - 64 - 0,1,2,3,4,5,6,7,8,9,10,11,12,13, // Letters 'A' through 'N' - 14,15,16,17,18,19,20,21,22,23,24,25, // Letters 'O' through 'Z' - -9,-9,-9,-9, // Decimal 91 - 94 - 63, // Underscore at decimal 95 - -9, // Decimal 96 - 26,27,28,29,30,31,32,33,34,35,36,37,38, // Letters 'a' through 'm' - 39,40,41,42,43,44,45,46,47,48,49,50,51, // Letters 'n' through 'z' - -9,-9,-9,-9 // Decimal 123 - 126 - /*,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 127 - 139 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 140 - 152 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 153 - 165 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 166 - 178 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 179 - 191 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 192 - 204 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 205 - 217 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 218 - 230 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 231 - 243 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9 // Decimal 244 - 255 */ - }; - - - -/* ******** O R D E R E D B A S E 6 4 A L P H A B E T ******** */ - - /** - * I don't get the point of this technique, but it is described here: - * http://www.faqs.org/qa/rfcc-1940.html. - */ - private final static byte[] _ORDERED_ALPHABET = - { - (byte)'-', - (byte)'0', (byte)'1', (byte)'2', (byte)'3', (byte)'4', - (byte)'5', (byte)'6', (byte)'7', (byte)'8', (byte)'9', - (byte)'A', (byte)'B', (byte)'C', (byte)'D', (byte)'E', (byte)'F', (byte)'G', - (byte)'H', (byte)'I', (byte)'J', (byte)'K', (byte)'L', (byte)'M', (byte)'N', - (byte)'O', (byte)'P', (byte)'Q', (byte)'R', (byte)'S', (byte)'T', (byte)'U', - (byte)'V', (byte)'W', (byte)'X', (byte)'Y', (byte)'Z', - (byte)'_', - (byte)'a', (byte)'b', (byte)'c', (byte)'d', (byte)'e', (byte)'f', (byte)'g', - (byte)'h', (byte)'i', (byte)'j', (byte)'k', (byte)'l', (byte)'m', (byte)'n', - (byte)'o', (byte)'p', (byte)'q', (byte)'r', (byte)'s', (byte)'t', (byte)'u', - (byte)'v', (byte)'w', (byte)'x', (byte)'y', (byte)'z' - }; - - /** - * Used in decoding the "ordered" dialect of Base64. - */ - private final static byte[] _ORDERED_DECODABET = - { - -9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 0 - 8 - -5,-5, // Whitespace: Tab and Linefeed - -9,-9, // Decimal 11 - 12 - -5, // Whitespace: Carriage Return - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 14 - 26 - -9,-9,-9,-9,-9, // Decimal 27 - 31 - -5, // Whitespace: Space - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 33 - 42 - -9, // Plus sign at decimal 43 - -9, // Decimal 44 - 0, // Minus sign at decimal 45 - -9, // Decimal 46 - -9, // Slash at decimal 47 - 1,2,3,4,5,6,7,8,9,10, // Numbers zero through nine - -9,-9,-9, // Decimal 58 - 60 - -1, // Equals sign at decimal 61 - -9,-9,-9, // Decimal 62 - 64 - 11,12,13,14,15,16,17,18,19,20,21,22,23, // Letters 'A' through 'M' - 24,25,26,27,28,29,30,31,32,33,34,35,36, // Letters 'N' through 'Z' - -9,-9,-9,-9, // Decimal 91 - 94 - 37, // Underscore at decimal 95 - -9, // Decimal 96 - 38,39,40,41,42,43,44,45,46,47,48,49,50, // Letters 'a' through 'm' - 51,52,53,54,55,56,57,58,59,60,61,62,63, // Letters 'n' through 'z' - -9,-9,-9,-9 // Decimal 123 - 126 - /*,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 127 - 139 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 140 - 152 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 153 - 165 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 166 - 178 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 179 - 191 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 192 - 204 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 205 - 217 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 218 - 230 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 231 - 243 - -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9 // Decimal 244 - 255 */ - }; - - -/* ******** D E T E R M I N E W H I C H A L H A B E T ******** */ - - - /** - * Returns one of the _SOMETHING_ALPHABET byte arrays depending on - * the options specified. - * It's possible, though silly, to specify ORDERED and URLSAFE - * in which case one of them will be picked, though there is - * no guarantee as to which one will be picked. - * @param options Specify the type of alphabet desired. - * @return The requested alphabet. - */ - private static byte[] getAlphabet( int options ) - { - if( (options & URL_SAFE) == URL_SAFE ) return _URL_SAFE_ALPHABET; - else if( (options & ORDERED) == ORDERED ) return _ORDERED_ALPHABET; - else return _STANDARD_ALPHABET; - - } - - /** - * Returns one of the _SOMETHING_DECODABET byte arrays depending on - * the options specified. - * It's possible, though silly, to specify ORDERED and URL_SAFE - * in which case one of them will be picked, though there is - * no guarantee as to which one will be picked. - * @param options Specify the type of alphabet desired. - * @return The requested alphabet. - */ - private static byte[] getDecodabet( int options ) - { - if( (options & URL_SAFE) == URL_SAFE ) return _URL_SAFE_DECODABET; - else if( (options & ORDERED) == ORDERED ) return _ORDERED_DECODABET; - else return _STANDARD_DECODABET; - - } - - /** Defeats instantiation. */ - private Base64(){} - -/* ******** E N C O D I N G M E T H O D S ******** */ - - /** - * Encodes up to the first three bytes of array threeBytes - * and returns a four-byte array in Base64 notation. - * The actual number of significant bytes in your array is - * given by numSigBytes. - * The array threeBytes needs only be as big as - * numSigBytes. - * Code can reuse a byte array by passing a four-byte array as b4. - * - * @param b4 A reusable byte array to reduce array instantiation - * @param threeBytes the array to convert - * @param numSigBytes the number of significant bytes in your array - * @param options the alphabet options to use. - * @return four byte array in Base64 notation. - * @since 1.5.1 - */ - private static byte[] encode3to4( byte[] b4, byte[] threeBytes, int numSigBytes, int options ) - { - encode3to4( threeBytes, 0, numSigBytes, b4, 0, options ); - return b4; - } - - /** - *

Encodes up to three bytes of the array source - * and writes the resulting four Base64 bytes to destination. - * The source and destination arrays can be manipulated - * anywhere along their length by specifying - * srcOffset and destOffset. - * This method does not check to make sure your arrays - * are large enough to accomodate srcOffset + 3 for - * the source array or destOffset + 4 for - * the destination array. - * The actual number of significant bytes in your array is - * given by numSigBytes.

- *

This is the lowest level of the encoding methods with - * all possible parameters.

- * - * @param source the array to convert - * @param srcOffset the index where conversion begins - * @param numSigBytes the number of significant bytes in your array - * @param destination the array to hold the conversion - * @param destOffset the index where output will be put - * @param options the alphabet options to use - * @return the destination array - * @since 1.3 - */ - private static byte[] encode3to4( - byte[] source, int srcOffset, int numSigBytes, - byte[] destination, int destOffset, int options ) - { - byte[] ALPHABET = getAlphabet( options ); - - // 1 2 3 - // 01234567890123456789012345678901 Bit position - // --------000000001111111122222222 Array position from threeBytes - // --------| || || || | Six bit groups to index ALPHABET - // >>18 >>12 >> 6 >> 0 Right shift necessary - // 0x3f 0x3f 0x3f Additional AND - - // Create buffer with zero-padding if there are only one or two - // significant bytes passed in the array. - // We have to shift left 24 in order to flush out the 1's that appear - // when Java treats a value as negative that is cast from a byte to an int. - int inBuff = ( numSigBytes > 0 ? ((source[ srcOffset ] << 24) >>> 8) : 0 ) - | ( numSigBytes > 1 ? ((source[ srcOffset + 1 ] << 24) >>> 16) : 0 ) - | ( numSigBytes > 2 ? ((source[ srcOffset + 2 ] << 24) >>> 24) : 0 ); - - switch( numSigBytes ) - { - case 3: - destination[ destOffset ] = ALPHABET[ (inBuff >>> 18) ]; - destination[ destOffset + 1 ] = ALPHABET[ (inBuff >>> 12) & 0x3f ]; - destination[ destOffset + 2 ] = ALPHABET[ (inBuff >>> 6) & 0x3f ]; - destination[ destOffset + 3 ] = ALPHABET[ (inBuff ) & 0x3f ]; - return destination; - - case 2: - destination[ destOffset ] = ALPHABET[ (inBuff >>> 18) ]; - destination[ destOffset + 1 ] = ALPHABET[ (inBuff >>> 12) & 0x3f ]; - destination[ destOffset + 2 ] = ALPHABET[ (inBuff >>> 6) & 0x3f ]; - destination[ destOffset + 3 ] = EQUALS_SIGN; - return destination; - - case 1: - destination[ destOffset ] = ALPHABET[ (inBuff >>> 18) ]; - destination[ destOffset + 1 ] = ALPHABET[ (inBuff >>> 12) & 0x3f ]; - destination[ destOffset + 2 ] = EQUALS_SIGN; - destination[ destOffset + 3 ] = EQUALS_SIGN; - return destination; - - default: - return destination; - } // end switch - } - - /** - * Serializes an object and returns the Base64-encoded - * version of that serialized object. If the object - * cannot be serialized or there is another error, - * the method will return null. - * The object is not GZip-compressed before being encoded. - * - * @param serializableObject The object to encode - * @return The Base64-encoded object - * @since 1.4 - */ - public static String encodeObject( java.io.Serializable serializableObject ) - { - return encodeObject( serializableObject, NO_OPTIONS ); - } // end encodeObject - - - - /** - * Serializes an object and returns the Base64-encoded - * version of that serialized object. If the object - * cannot be serialized or there is another error, - * the method will return null. - *

- * Valid options:

-     *   GZIP: gzip-compresses object before encoding it.
-     *   DONT_BREAK_LINES: don't break lines at 76 characters
-     *     Note: Technically, this makes your encoding non-compliant.
-     * 
- *

- * Example: encodeObject( myObj, Base64.GZIP ) or - *

- * Example: encodeObject( myObj, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * - * @param serializableObject The object to encode - * @param options Specified options - * @return The Base64-encoded object - * @see Base64#GZIP - * @see Base64#DONT_BREAK_LINES - * @since 2.0 - */ - public static String encodeObject( java.io.Serializable serializableObject, int options ) - { - // Streams - java.io.ByteArrayOutputStream baos = null; - java.io.OutputStream b64os = null; - java.io.ObjectOutputStream oos = null; - java.util.zip.GZIPOutputStream gzos = null; - - // Isolate options - int gzip = (options & GZIP); - - try - { - // ObjectOutputStream -> (GZIP) -> Base64 -> ByteArrayOutputStream - baos = new java.io.ByteArrayOutputStream(); - b64os = new Base64.OutputStream( baos, ENCODE | options ); - - // GZip? - if( gzip == GZIP ) - { - gzos = new java.util.zip.GZIPOutputStream( b64os ); - oos = new java.io.ObjectOutputStream( gzos ); - } // end if: gzip - else - oos = new java.io.ObjectOutputStream( b64os ); - - oos.writeObject( serializableObject ); - } // end try - catch( java.io.IOException e ) - { - e.printStackTrace(); - return null; - } // end catch - finally - { - try{ oos.close(); } catch( Exception e ){ /* empty */ } - try{ gzos.close(); } catch( Exception e ){ /* empty */ } - try{ b64os.close(); } catch( Exception e ){ /* empty */ } - try{ baos.close(); } catch( Exception e ){ /* empty */ } - } // end finally - - // Return value according to relevant encoding. - try - { - return new String( baos.toByteArray(), PREFERRED_ENCODING ); - } // end try - catch (java.io.UnsupportedEncodingException uue) - { - return new String( baos.toByteArray() ); - } // end catch - - } // end encode - - - - /** - * Encodes a byte array into Base64 notation. - * Does not GZip-compress data. - * - * @param source The data to convert - * @return the string of encoded bytes. - * @since 1.4 - */ - public static String encodeBytes( byte[] source ) - { - return encodeBytes( source, 0, source.length, NO_OPTIONS ); - } // end encodeBytes - - - - /** - * Encodes a byte array into Base64 notation. - *

- * Valid options:

-     *   GZIP: gzip-compresses object before encoding it.
-     *   DONT_BREAK_LINES: don't break lines at 76 characters
-     *     Note: Technically, this makes your encoding non-compliant.
-     * 
- *

- * Example: encodeBytes( myData, Base64.GZIP ) or - *

- * Example: encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * - * - * @param source The data to convert - * @param options Specified options - * @return the string of encoded bytes - * @see Base64#GZIP - * @see Base64#DONT_BREAK_LINES - * @since 2.0 - */ - public static String encodeBytes( byte[] source, int options ) - { - return encodeBytes( source, 0, source.length, options ); - } // end encodeBytes - - - /** - * Encodes a byte array into Base64 notation. - * Does not GZip-compress data. - * - * @param source The data to convert - * @param off Offset in array where conversion should begin - * @param len Length of data to convert - * @return the string of encoded bytes - * @since 1.4 - */ - public static String encodeBytes( byte[] source, int off, int len ) - { - return encodeBytes( source, off, len, NO_OPTIONS ); - } // end encodeBytes - - - - /** - * Encodes a byte array into Base64 notation. - *

- * Valid options:

-     *   GZIP: gzip-compresses object before encoding it.
-     *   DONT_BREAK_LINES: don't break lines at 76 characters
-     *     Note: Technically, this makes your encoding non-compliant.
-     * 
- *

- * Example: encodeBytes( myData, Base64.GZIP ) or - *

- * Example: encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * - * - * @param source The data to convert - * @param off Offset in array where conversion should begin - * @param len Length of data to convert - * @param options alphabet type is pulled from this (standard, url-safe, ordered) - * @return the string of encoded bytes - * @see Base64#GZIP - * @see Base64#DONT_BREAK_LINES - * @since 2.0 - */ - public static String encodeBytes( byte[] source, int off, int len, int options ) - { - // Isolate options - int dontBreakLines = ( options & DONT_BREAK_LINES ); - int gzip = ( options & GZIP ); - - // Compress? - if( gzip == GZIP ) - { - java.io.ByteArrayOutputStream baos = null; - java.util.zip.GZIPOutputStream gzos = null; - Base64.OutputStream b64os = null; - - - try - { - // GZip -> Base64 -> ByteArray - baos = new java.io.ByteArrayOutputStream(); - b64os = new Base64.OutputStream( baos, ENCODE | options ); - gzos = new java.util.zip.GZIPOutputStream( b64os ); - - gzos.write( source, off, len ); - gzos.close(); - } // end try - catch( java.io.IOException e ) - { - e.printStackTrace(); - return null; - } // end catch - finally - { - try{ gzos.close(); } catch( Exception e ){ /* empty */ } - try{ b64os.close(); } catch( Exception e ){ /* empty */ } - try{ baos.close(); } catch( Exception e ){ /* empty */ } - } // end finally - - // Return value according to relevant encoding. - try - { - return new String( baos.toByteArray(), PREFERRED_ENCODING ); - } // end try - catch (java.io.UnsupportedEncodingException uue) - { - return new String( baos.toByteArray() ); - } // end catch - } // end if: compress - - // Else, don't compress. Better not to use streams at all then. - else - { - // Convert option to boolean in way that code likes it. - boolean breakLines = dontBreakLines == 0; - - int len43 = len * 4 / 3; - byte[] outBuff = new byte[ ( len43 ) // Main 4:3 - + ( (len % 3) > 0 ? 4 : 0 ) // Account for padding - + (breakLines ? ( len43 / MAX_LINE_LENGTH ) : 0) ]; // New lines - int d = 0; - int e = 0; - int len2 = len - 2; - int lineLength = 0; - for( ; d < len2; d+=3, e+=4 ) - { - encode3to4( source, d+off, 3, outBuff, e, options ); - - lineLength += 4; - if( breakLines && lineLength == MAX_LINE_LENGTH ) - { - outBuff[e+4] = NEW_LINE; - e++; - lineLength = 0; - } // end if: end of line - } // en dfor: each piece of array - - if( d < len ) - { - encode3to4( source, d+off, len - d, outBuff, e, options ); - e += 4; - } // end if: some padding needed - - - // Return value according to relevant encoding. - try - { - return new String( outBuff, 0, e, PREFERRED_ENCODING ); - } // end try - catch (java.io.UnsupportedEncodingException uue) - { - return new String( outBuff, 0, e ); - } // end catch - - } // end else: don't compress - - } // end encodeBytes - - - - - -/* ******** D E C O D I N G M E T H O D S ******** */ - - - /** - * Decodes four bytes from array source - * and writes the resulting bytes (up to three of them) - * to destination. - * The source and destination arrays can be manipulated - * anywhere along their length by specifying - * srcOffset and destOffset. - * This method does not check to make sure your arrays - * are large enough to accomodate srcOffset + 4 for - * the source array or destOffset + 3 for - * the destination array. - * This method returns the actual number of bytes that - * were converted from the Base64 encoding. - *

This is the lowest level of the decoding methods with - * all possible parameters.

- * - * - * @param source the array to convert - * @param srcOffset the index where conversion begins - * @param destination the array to hold the conversion - * @param destOffset the index where output will be put - * @param options alphabet type is pulled from this (standard, url-safe, ordered) - * @return the number of decoded bytes converted - * @since 1.3 - */ - private static int decode4to3( byte[] source, int srcOffset, byte[] destination, int destOffset, int options ) - { - byte[] DECODABET = getDecodabet( options ); - - // Example: Dk== - if( source[ srcOffset + 2] == EQUALS_SIGN ) - { - // Two ways to do the same thing. Don't know which way I like best. - //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) - // | ( ( DECODABET[ source[ srcOffset + 1] ] << 24 ) >>> 12 ); - int outBuff = ( ( DECODABET[ source[ srcOffset ] ] & 0xFF ) << 18 ) - | ( ( DECODABET[ source[ srcOffset + 1] ] & 0xFF ) << 12 ); - - destination[ destOffset ] = (byte)( outBuff >>> 16 ); - return 1; - } - - // Example: DkL= - else if( source[ srcOffset + 3 ] == EQUALS_SIGN ) - { - // Two ways to do the same thing. Don't know which way I like best. - //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) - // | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 ) - // | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 ); - int outBuff = ( ( DECODABET[ source[ srcOffset ] ] & 0xFF ) << 18 ) - | ( ( DECODABET[ source[ srcOffset + 1 ] ] & 0xFF ) << 12 ) - | ( ( DECODABET[ source[ srcOffset + 2 ] ] & 0xFF ) << 6 ); - - destination[ destOffset ] = (byte)( outBuff >>> 16 ); - destination[ destOffset + 1 ] = (byte)( outBuff >>> 8 ); - return 2; - } - - // Example: DkLE - else - { - try{ - // Two ways to do the same thing. Don't know which way I like best. - //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) - // | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 ) - // | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 ) - // | ( ( DECODABET[ source[ srcOffset + 3 ] ] << 24 ) >>> 24 ); - int outBuff = ( ( DECODABET[ source[ srcOffset ] ] & 0xFF ) << 18 ) - | ( ( DECODABET[ source[ srcOffset + 1 ] ] & 0xFF ) << 12 ) - | ( ( DECODABET[ source[ srcOffset + 2 ] ] & 0xFF ) << 6) - | ( ( DECODABET[ source[ srcOffset + 3 ] ] & 0xFF ) ); - - - destination[ destOffset ] = (byte)( outBuff >> 16 ); - destination[ destOffset + 1 ] = (byte)( outBuff >> 8 ); - destination[ destOffset + 2 ] = (byte)( outBuff ); - - return 3; - }catch( Exception e){ - System.out.println(""+source[srcOffset]+ ": " + ( DECODABET[ source[ srcOffset ] ] ) ); - System.out.println(""+source[srcOffset+1]+ ": " + ( DECODABET[ source[ srcOffset + 1 ] ] ) ); - System.out.println(""+source[srcOffset+2]+ ": " + ( DECODABET[ source[ srcOffset + 2 ] ] ) ); - System.out.println(""+source[srcOffset+3]+ ": " + ( DECODABET[ source[ srcOffset + 3 ] ] ) ); - return -1; - } // end catch - } - } // end decodeToBytes - - - - - /** - * Very low-level access to decoding ASCII characters in - * the form of a byte array. Does not support automatically - * gunzipping or any other "fancy" features. - * - * @param source The Base64 encoded data - * @param off The offset of where to begin decoding - * @param len The length of characters to decode - * @param options alphabet type is pulled from this (standard, url-safe, ordered) - * @return decoded data - * @since 1.3 - */ - public static byte[] decode( byte[] source, int off, int len, int options ) - { - byte[] DECODABET = getDecodabet( options ); - - int len34 = len * 3 / 4; - byte[] outBuff = new byte[ len34 ]; // Upper limit on size of output - int outBuffPosn = 0; - - byte[] b4 = new byte[4]; - int b4Posn = 0; - int i; - byte sbiCrop; - byte sbiDecode; - for( i = off; i < off+len; i++ ) - { - sbiCrop = (byte)(source[i] & 0x7f); // Only the low seven bits - sbiDecode = DECODABET[ sbiCrop ]; - - if( sbiDecode >= WHITE_SPACE_ENC ) // White space, Equals sign or better - { - if( sbiDecode >= EQUALS_SIGN_ENC ) - { - b4[ b4Posn++ ] = sbiCrop; - if( b4Posn > 3 ) - { - outBuffPosn += decode4to3( b4, 0, outBuff, outBuffPosn, options ); - b4Posn = 0; - - // If that was the equals sign, break out of 'for' loop - if( sbiCrop == EQUALS_SIGN ) - break; - } // end if: quartet built - - } // end if: equals sign or better - - } // end if: white space, equals sign or better - else - { - System.err.println( "Bad Base64 input character at " + i + ": " + source[i] + "(decimal)" ); - return null; - } // end else: - } // each input character - - byte[] out = new byte[ outBuffPosn ]; - System.arraycopy( outBuff, 0, out, 0, outBuffPosn ); - return out; - } // end decode - - - - - /** - * Decodes data from Base64 notation, automatically - * detecting gzip-compressed data and decompressing it. - * - * @param s the string to decode - * @return the decoded data - * @since 1.4 - */ - public static byte[] decode( String s ) - { - return decode( s, NO_OPTIONS ); - } - - - /** - * Decodes data from Base64 notation, automatically - * detecting gzip-compressed data and decompressing it. - * - * @param s the string to decode - * @param options encode options such as URL_SAFE - * @return the decoded data - * @since 1.4 - */ - public static byte[] decode( String s, int options ) - { - byte[] bytes; - try - { - bytes = s.getBytes( PREFERRED_ENCODING ); - } // end try - catch( java.io.UnsupportedEncodingException uee ) - { - bytes = s.getBytes(); - } // end catch - // - - // Decode - bytes = decode( bytes, 0, bytes.length, options ); - - - // Check to see if it's gzip-compressed - // GZIP Magic Two-Byte Number: 0x8b1f (35615) - if( bytes != null && bytes.length >= 4 ) - { - - int head = ((int)bytes[0] & 0xff) | ((bytes[1] << 8) & 0xff00); - if( java.util.zip.GZIPInputStream.GZIP_MAGIC == head ) - { - java.io.ByteArrayInputStream bais = null; - java.util.zip.GZIPInputStream gzis = null; - java.io.ByteArrayOutputStream baos = null; - byte[] buffer = new byte[2048]; - int length; - - try - { - baos = new java.io.ByteArrayOutputStream(); - bais = new java.io.ByteArrayInputStream( bytes ); - gzis = new java.util.zip.GZIPInputStream( bais ); - - while( ( length = gzis.read( buffer ) ) >= 0 ) - { - baos.write(buffer,0,length); - } // end while: reading input - - // No error? Get new bytes. - bytes = baos.toByteArray(); - - } // end try - catch( java.io.IOException e ) - { - // Just return originally-decoded bytes - } // end catch - finally - { - try{ baos.close(); } catch( Exception e ){ /* empty */ } - try{ gzis.close(); } catch( Exception e ){ /* empty */ } - try{ bais.close(); } catch( Exception e ){ /* empty */ } - } // end finally - - } // end if: gzipped - } // end if: bytes.length >= 2 - - return bytes; - } // end decode - - - - - /** - * Attempts to decode Base64 data and deserialize a Java - * Object within. Returns null if there was an error. - * - * @param encodedObject The Base64 data to decode - * @return The decoded and deserialized object - * @since 1.5 - */ - public static Object decodeToObject( String encodedObject ) - { - // Decode and gunzip if necessary - byte[] objBytes = decode( encodedObject ); - - java.io.ByteArrayInputStream bais = null; - java.io.ObjectInputStream ois = null; - Object obj = null; - - try - { - bais = new java.io.ByteArrayInputStream( objBytes ); - ois = new java.io.ObjectInputStream( bais ); - - obj = ois.readObject(); - } // end try - catch( java.io.IOException e ) - { - e.printStackTrace(); - obj = null; - } // end catch - catch( java.lang.ClassNotFoundException e ) - { - e.printStackTrace(); - obj = null; - } // end catch - finally - { - if (bais!=null) - try{ bais.close(); } catch( Exception e ){ /* empty */ } - if (ois!=null) - try{ ois.close(); } catch( Exception e ){ /* empty */ } - } // end finally - - return obj; - } // end decodeObject - - - - /** - * Convenience method for encoding data to a file. - * - * @param dataToEncode byte array of data to encode in base64 form - * @param filename Filename for saving encoded data - * @return true if successful, false otherwise - * - * @since 2.1 - */ - public static boolean encodeToFile( byte[] dataToEncode, String filename ) - { - boolean success = false; - Base64.OutputStream bos = null; - try - { - bos = new Base64.OutputStream( - new java.io.FileOutputStream( filename ), Base64.ENCODE ); - bos.write( dataToEncode ); - success = true; - } // end try - catch( java.io.IOException e ) - { - - success = false; - } // end catch: IOException - finally - { - if (bos!=null) - try{ bos.close(); } catch( Exception e ){ /* empty */ } - } // end finally - - return success; - } // end encodeToFile - - - /** - * Convenience method for decoding data to a file. - * - * @param dataToDecode Base64-encoded data as a string - * @param filename Filename for saving decoded data - * @return true if successful, false otherwise - * - * @since 2.1 - */ - public static boolean decodeToFile( String dataToDecode, String filename ) - { - boolean success = false; - Base64.OutputStream bos = null; - try - { - bos = new Base64.OutputStream( - new java.io.FileOutputStream( filename ), Base64.DECODE ); - bos.write( dataToDecode.getBytes( PREFERRED_ENCODING ) ); - success = true; - } // end try - catch( java.io.IOException e ) - { - success = false; - } // end catch: IOException - finally - { - if (bos!=null) - try{ bos.close(); } catch( Exception e ){ /* empty */ } - } // end finally - - return success; - } // end decodeToFile - - - - - /** - * Convenience method for reading a base64-encoded - * file and decoding it. - * - * @param filename Filename for reading encoded data - * @return decoded byte array or null if unsuccessful - * - * @since 2.1 - */ - public static byte[] decodeFromFile( String filename ) - { - byte[] decodedData = null; - Base64.InputStream bis = null; - try - { - // Set up some useful variables - java.io.File file = new java.io.File( filename ); - byte[] buffer; - int length = 0; - int numBytes; - - // Check for size of file - if( file.length() > Integer.MAX_VALUE ) - { - System.err.println( "File is too big for this convenience method (" + file.length() + " bytes)." ); - return null; - } // end if: file too big for int index - buffer = new byte[ (int)file.length() ]; - - // Open a stream - bis = new Base64.InputStream( - new java.io.BufferedInputStream( - new java.io.FileInputStream( file ) ), Base64.DECODE ); - - // Read until done - while( ( numBytes = bis.read( buffer, length, 4096 ) ) >= 0 ) - length += numBytes; - - // Save in a variable to return - decodedData = new byte[ length ]; - System.arraycopy( buffer, 0, decodedData, 0, length ); - - } // end try - catch( java.io.IOException e ) - { - System.err.println( "Error decoding from file " + filename ); - } // end catch: IOException - finally - { - if (bis!=null) - try{ bis.close(); } catch( Exception e) { /* empty */ } - } // end finally - - return decodedData; - } // end decodeFromFile - - - - /** - * Convenience method for reading a binary file - * and base64-encoding it. - * - * @param filename Filename for reading binary data - * @return base64-encoded string or null if unsuccessful - * - * @since 2.1 - */ - public static String encodeFromFile( String filename ) - { - String encodedData = null; - Base64.InputStream bis = null; - try - { - // Set up some useful variables - java.io.File file = new java.io.File( filename ); - byte[] buffer = new byte[ Math.max((int)(file.length() * 1.4),40) ]; // Need max() for math on small files (v2.2.1) - int length = 0; - int numBytes; - - // Open a stream - bis = new Base64.InputStream( - new java.io.BufferedInputStream( - new java.io.FileInputStream( file ) ), Base64.ENCODE ); - - // Read until done - while( ( numBytes = bis.read( buffer, length, 4096 ) ) >= 0 ) - length += numBytes; - - // Save in a variable to return - encodedData = new String( buffer, 0, length, Base64.PREFERRED_ENCODING ); - - } // end try - catch( java.io.IOException e ) - { - System.err.println( "Error encoding from file " + filename ); - } // end catch: IOException - finally - { - if (bis!=null) - try{ bis.close(); } catch( Exception e) { /* empty */ } - } // end finally - - return encodedData; - } // end encodeFromFile - - /** - * Reads infile and encodes it to outfile. - * - * @param infile Input file - * @param outfile Output file - * @since 2.2 - */ - public static void encodeFileToFile( String infile, String outfile ) - { - String encoded = Base64.encodeFromFile( infile ); - java.io.OutputStream out = null; - try{ - out = new java.io.BufferedOutputStream( - new java.io.FileOutputStream( outfile ) ); - out.write( encoded.getBytes("US-ASCII") ); // Strict, 7-bit output. - } // end try - catch( java.io.IOException ex ) { - ex.printStackTrace(); - } // end catch - finally { - if (out!=null) - try { out.close(); } - catch( Exception ex ){ /* empty */ } - } // end finally - } // end encodeFileToFile - - - /** - * Reads infile and decodes it to outfile. - * - * @param infile Input file - * @param outfile Output file - * @since 2.2 - */ - public static void decodeFileToFile( String infile, String outfile ) - { - byte[] decoded = Base64.decodeFromFile( infile ); - java.io.OutputStream out = null; - try{ - out = new java.io.BufferedOutputStream( - new java.io.FileOutputStream( outfile ) ); - out.write( decoded ); - } // end try - catch( java.io.IOException ex ) { - ex.printStackTrace(); - } // end catch - finally { - if (out!=null) - try { out.close(); } - catch( Exception ex ){ /* empty */ } - } // end finally - } // end decodeFileToFile - - - /* ******** I N N E R C L A S S I N P U T S T R E A M ******** */ - - - - /** - * A {@link Base64.InputStream} will read data from another - * java.io.InputStream, given in the constructor, - * and encode/decode to/from Base64 notation on the fly. - * - * @see Base64 - * @since 1.3 - */ - public static class InputStream extends java.io.FilterInputStream - { - private boolean encode; // Encoding or decoding - private int position; // Current position in the buffer - private byte[] buffer; // Small buffer holding converted data - private int bufferLength; // Length of buffer (3 or 4) - private int numSigBytes; // Number of meaningful bytes in the buffer - private int lineLength; - private boolean breakLines; // Break lines at less than 80 characters - private int options; // Record options used to create the stream. - @SuppressWarnings("unused") - private byte[] alphabet; // Local copies to avoid extra method calls - private byte[] decodabet; // Local copies to avoid extra method calls - - - /** - * Constructs a {@link Base64.InputStream} in DECODE mode. - * - * @param in the java.io.InputStream from which to read data. - * @since 1.3 - */ - public InputStream( java.io.InputStream in ) - { - this( in, DECODE ); - } // end constructor - - - /** - * Constructs a {@link Base64.InputStream} in - * either ENCODE or DECODE mode. - *

- * Valid options:

-         *   ENCODE or DECODE: Encode or Decode as data is read.
-         *   DONT_BREAK_LINES: don't break lines at 76 characters
-         *     (only meaningful when encoding)
-         *     Note: Technically, this makes your encoding non-compliant.
-         * 
- *

- * Example: new Base64.InputStream( in, Base64.DECODE ) - * - * - * @param in the java.io.InputStream from which to read data. - * @param options Specified options - * @see Base64#ENCODE - * @see Base64#DECODE - * @see Base64#DONT_BREAK_LINES - * @since 2.0 - */ - public InputStream( java.io.InputStream in, int options ) - { - super( in ); - this.breakLines = (options & DONT_BREAK_LINES) != DONT_BREAK_LINES; - this.encode = (options & ENCODE) == ENCODE; - this.bufferLength = encode ? 4 : 3; - this.buffer = new byte[ bufferLength ]; - this.position = -1; - this.lineLength = 0; - this.options = options; // Record for later, mostly to determine which alphabet to use - this.alphabet = getAlphabet(options); - this.decodabet = getDecodabet(options); - } // end constructor - - /** - * Reads enough of the input stream to convert - * to/from Base64 and returns the next byte. - * - * @return next byte - * @since 1.3 - */ - public int read() throws java.io.IOException - { - // Do we need to get data? - if( position < 0 ) - { - if( encode ) - { - byte[] b3 = new byte[3]; - int numBinaryBytes = 0; - for( int i = 0; i < 3; i++ ) - { - try - { - int b = in.read(); - - // If end of stream, b is -1. - if( b >= 0 ) - { - b3[i] = (byte)b; - numBinaryBytes++; - } // end if: not end of stream - - } // end try: read - catch( java.io.IOException e ) - { - // Only a problem if we got no data at all. - if( i == 0 ) - throw e; - - } // end catch - } // end for: each needed input byte - - if( numBinaryBytes > 0 ) - { - encode3to4( b3, 0, numBinaryBytes, buffer, 0, options ); - position = 0; - numSigBytes = 4; - } // end if: got data - else - { - return -1; - } // end else - } // end if: encoding - - // Else decoding - else - { - byte[] b4 = new byte[4]; - int i; - for( i = 0; i < 4; i++ ) - { - // Read four "meaningful" bytes: - int b; - do{ b = in.read(); } - while( b >= 0 && decodabet[ b & 0x7f ] <= WHITE_SPACE_ENC ); - - if( b < 0 ) - break; // Reads a -1 if end of stream - - b4[i] = (byte)b; - } // end for: each needed input byte - - if( i == 4 ) - { - numSigBytes = decode4to3( b4, 0, buffer, 0, options ); - position = 0; - } // end if: got four characters - else if( i == 0 ){ - return -1; - } // end else if: also padded correctly - else - { - // Must have broken out from above. - throw new java.io.IOException( "Improperly padded Base64 input." ); - } // end - - } // end else: decode - } // end else: get data - - // Got data? - if( position >= 0 ) - { - // End of relevant data? - if( /*!encode &&*/ position >= numSigBytes ) - return -1; - - if( encode && breakLines && lineLength >= MAX_LINE_LENGTH ) - { - lineLength = 0; - return '\n'; - } // end if - else - { - lineLength++; // This isn't important when decoding - // but throwing an extra "if" seems - // just as wasteful. - - int b = buffer[ position++ ]; - - if( position >= bufferLength ) - position = -1; - - return b & 0xFF; // This is how you "cast" a byte that's - // intended to be unsigned. - } // end else - } // end if: position >= 0 - - // Else error - else - { - // When JDK1.4 is more accepted, use an assertion here. - throw new java.io.IOException( "Error in Base64 code reading stream." ); - } // end else - } // end read - - - /** - * Calls {@link #read()} repeatedly until the end of stream - * is reached or len bytes are read. - * Returns number of bytes read into array or -1 if - * end of stream is encountered. - * - * @param dest array to hold values - * @param off offset for array - * @param len max number of bytes to read into array - * @return bytes read into array or -1 if end of stream is encountered. - * @since 1.3 - */ - public int read( byte[] dest, int off, int len ) throws java.io.IOException - { - int i; - int b; - for( i = 0; i < len; i++ ) - { - b = read(); - - //if( b < 0 && i == 0 ) - // return -1; - - if( b >= 0 ) - dest[off + i] = (byte)b; - else if( i == 0 ) - return -1; - else - break; // Out of 'for' loop - } // end for: each byte read - return i; - } // end read - - } // end inner class InputStream - - - - - - - /* ******** I N N E R C L A S S O U T P U T S T R E A M ******** */ - - - - /** - * A {@link Base64.OutputStream} will write data to another - * java.io.OutputStream, given in the constructor, - * and encode/decode to/from Base64 notation on the fly. - * - * @see Base64 - * @since 1.3 - */ - public static class OutputStream extends java.io.FilterOutputStream - { - private boolean encode; - private int position; - private byte[] buffer; - private int bufferLength; - private int lineLength; - private boolean breakLines; - private byte[] b4; // Scratch used in a few places - private boolean suspendEncoding; - private int options; // Record for later - @SuppressWarnings("unused") - private byte[] alphabet; // Local copies to avoid extra method calls - private byte[] decodabet; // Local copies to avoid extra method calls - - /** - * Constructs a {@link Base64.OutputStream} in ENCODE mode. - * - * @param out the java.io.OutputStream to which data will be written. - * @since 1.3 - */ - public OutputStream( java.io.OutputStream out ) - { - this( out, ENCODE ); - } // end constructor - - - /** - * Constructs a {@link Base64.OutputStream} in - * either ENCODE or DECODE mode. - *

- * Valid options:

-         *   ENCODE or DECODE: Encode or Decode as data is read.
-         *   DONT_BREAK_LINES: don't break lines at 76 characters
-         *     (only meaningful when encoding)
-         *     Note: Technically, this makes your encoding non-compliant.
-         * 
- *

- * Example: new Base64.OutputStream( out, Base64.ENCODE ) - * - * @param out the java.io.OutputStream to which data will be written. - * @param options Specified options. - * @see Base64#ENCODE - * @see Base64#DECODE - * @see Base64#DONT_BREAK_LINES - * @since 1.3 - */ - public OutputStream( java.io.OutputStream out, int options ) - { - super( out ); - this.breakLines = (options & DONT_BREAK_LINES) != DONT_BREAK_LINES; - this.encode = (options & ENCODE) == ENCODE; - this.bufferLength = encode ? 3 : 4; - this.buffer = new byte[ bufferLength ]; - this.position = 0; - this.lineLength = 0; - this.suspendEncoding = false; - this.b4 = new byte[4]; - this.options = options; - this.alphabet = getAlphabet(options); - this.decodabet = getDecodabet(options); - } // end constructor - - - /** - * Writes the byte to the output stream after - * converting to/from Base64 notation. - * When encoding, bytes are buffered three - * at a time before the output stream actually - * gets a write() call. - * When decoding, bytes are buffered four - * at a time. - * - * @param theByte the byte to write - * @since 1.3 - */ - public void write(int theByte) throws java.io.IOException - { - // Encoding suspended? - if( suspendEncoding ) - { - super.out.write( theByte ); - return; - } // end if: supsended - - // Encode? - if( encode ) - { - buffer[ position++ ] = (byte)theByte; - if( position >= bufferLength ) // Enough to encode. - { - out.write( encode3to4( b4, buffer, bufferLength, options ) ); - - lineLength += 4; - if( breakLines && lineLength >= MAX_LINE_LENGTH ) - { - out.write( NEW_LINE ); - lineLength = 0; - } // end if: end of line - - position = 0; - } // end if: enough to output - } // end if: encoding - - // Else, Decoding - else - { - // Meaningful Base64 character? - if( decodabet[ theByte & 0x7f ] > WHITE_SPACE_ENC ) - { - buffer[ position++ ] = (byte)theByte; - if( position >= bufferLength ) // Enough to output. - { - int len = Base64.decode4to3( buffer, 0, b4, 0, options ); - out.write( b4, 0, len ); - //out.write( Base64.decode4to3( buffer ) ); - position = 0; - } // end if: enough to output - } // end if: meaningful base64 character - else if( decodabet[ theByte & 0x7f ] != WHITE_SPACE_ENC ) - { - throw new java.io.IOException( "Invalid character in Base64 data." ); - } // end else: not white space either - } // end else: decoding - } // end write - - - - /** - * Calls {@link #write(int)} repeatedly until len - * bytes are written. - * - * @param theBytes array from which to read bytes - * @param off offset for array - * @param len max number of bytes to read into array - * @since 1.3 - */ - public void write( byte[] theBytes, int off, int len ) throws java.io.IOException - { - // Encoding suspended? - if( suspendEncoding ) - { - super.out.write( theBytes, off, len ); - return; - } // end if: supsended - - for( int i = 0; i < len; i++ ) - { - write( theBytes[ off + i ] ); - } // end for: each byte written - - } // end write - - - - /** - * Method added by PHIL. [Thanks, PHIL. -Rob] - * This pads the buffer without closing the stream. - */ - public void flushBase64() throws java.io.IOException - { - if( position > 0 ) - { - if( encode ) - { - out.write( encode3to4( b4, buffer, position, options ) ); - position = 0; - } // end if: encoding - else - { - throw new java.io.IOException( "Base64 input not properly padded." ); - } // end else: decoding - } // end if: buffer partially full - - } // end flush - - - /** - * Flushes and closes (I think, in the superclass) the stream. - * - * @since 1.3 - */ - public void close() throws java.io.IOException - { - // 1. Ensure that pending characters are written - flushBase64(); - - // 2. Actually close the stream - // Base class both flushes and closes. - super.close(); - - buffer = null; - out = null; - } // end close - - - - /** - * Suspends encoding of the stream. - * May be helpful if you need to embed a piece of - * base640-encoded data in a stream. - * - * @since 1.5.1 - */ - public void suspendEncoding() throws java.io.IOException - { - flushBase64(); - this.suspendEncoding = true; - } // end suspendEncoding - - - /** - * Resumes encoding of the stream. - * May be helpful if you need to embed a piece of - * base640-encoded data in a stream. - * - * @since 1.5.1 - */ - public void resumeEncoding() - { - this.suspendEncoding = false; - } // end resumeEncoding - - - - } // end inner class OutputStream - - -} // end class Base64 diff --git a/utils/test/com/cloud/utils/component/MockComponentLocator.java b/utils/test/com/cloud/utils/component/MockComponentLocator.java index f993ff62e31..f7adf0640c3 100755 --- a/utils/test/com/cloud/utils/component/MockComponentLocator.java +++ b/utils/test/com/cloud/utils/component/MockComponentLocator.java @@ -67,13 +67,13 @@ public class MockComponentLocator extends ComponentLocator { _managerMap = new LinkedHashMap>(); _checkerMap = new LinkedHashMap>(); _adapterMap = new HashMap>(); - _pluggableServicesMap = new LinkedHashMap>(); + _pluginsMap = new HashMap>(); _factories = new HashMap, Class>(); _daoMap.putAll(_library.getDaos()); _managerMap.putAll(_library.getManagers()); result.second().putAll(_library.getAdapters()); _factories.putAll(_library.getFactories()); - _pluggableServicesMap.putAll(_library.getPluggableServices()); + _pluginsMap.putAll(_library.getPluggableServices()); return result; }