Merge branch 'master' into events-framework

This commit is contained in:
Murali Reddy 2012-11-06 11:46:02 +05:30 committed by Rohit Yadav
commit 9bb34be7a9
82 changed files with 2968 additions and 1094 deletions

2
.gitignore vendored
View File

@ -33,6 +33,7 @@ dist/
cloud-*.tar.bz2
*.log
*.pyc
*.egginfo/
build.number
api.log.*.gz
cloud.log.*.*
@ -45,6 +46,7 @@ deps/awsapi-lib/
git-remote-https.exe.stackdump
*.swp
tools/devcloud/devcloudbox/.vagrant
tools/cli/cloudmonkey/marvin/
*.jar
*.war
*.mar

View File

@ -27,7 +27,6 @@ import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.SynchronousQueue;
@ -48,6 +47,7 @@ import com.cloud.agent.api.MaintainAnswer;
import com.cloud.agent.api.MaintainCommand;
import com.cloud.agent.api.ModifySshKeysCommand;
import com.cloud.agent.api.PingCommand;
import com.cloud.agent.api.ReadyCommand;
import com.cloud.agent.api.ShutdownCommand;
import com.cloud.agent.api.StartupAnswer;
import com.cloud.agent.api.StartupCommand;
@ -491,6 +491,10 @@ public class Agent implements HandlerFactory, IAgentControl {
cancelTasks();
_reconnectAllowed = false;
answer = new Answer(cmd, true, null);
} else if (cmd instanceof ReadyCommand && ((ReadyCommand)cmd).getDetails() != null) {
s_logger.debug("Not ready to connect to mgt server: " + ((ReadyCommand)cmd).getDetails());
System.exit(1);
return;
} else if (cmd instanceof MaintainCommand) {
s_logger.debug("Received maintainCommand" );
cancelTasks();
@ -513,6 +517,9 @@ public class Agent implements HandlerFactory, IAgentControl {
}
} else {
if (cmd instanceof ReadyCommand) {
processReadyCommand((ReadyCommand)cmd);
}
_inProgress.incrementAndGet();
try {
answer = _resource.executeRequest(cmd);
@ -576,6 +583,19 @@ public class Agent implements HandlerFactory, IAgentControl {
setLastPingResponseTime();
}
}
public void processReadyCommand(Command cmd) {
final ReadyCommand ready = (ReadyCommand) cmd;
s_logger.info("Proccess agent ready command, agent id = " + ready.getHostId());
if (ready.getHostId() != null) {
setId(ready.getHostId());
}
s_logger.info("Ready command is processed: agent id = " + getId());
}
public void processOtherTask(Task task) {
final Object obj = task.get();
@ -601,6 +621,7 @@ public class Agent implements HandlerFactory, IAgentControl {
} catch (final ClosedChannelException e) {
s_logger.warn("Unable to send request: " + request.toString());
}
} else if (obj instanceof Request) {
final Request req = (Request) obj;
final Command command = req.getCommand();

View File

@ -23,12 +23,18 @@ public class ReadyCommand extends Command {
}
private Long dcId;
private Long hostId;
public ReadyCommand(Long dcId) {
super();
this.dcId = dcId;
}
public ReadyCommand(Long dcId, Long hostId) {
this(dcId);
this.hostId = hostId;
}
public void setDetails(String details) {
_details = details;
}
@ -46,4 +52,7 @@ public class ReadyCommand extends Command {
return true;
}
public Long getHostId() {
return hostId;
}
}

View File

@ -16,7 +16,9 @@
// under the License.
package com.cloud.async;
public interface SyncQueueItem {
public final String AsyncJobContentType = "AsyncJob";
String getContentType();

View File

@ -30,6 +30,7 @@ label.broadcast.uri=Broadcast URI
#modified labels (begin) *****************************************************************************************
label.site.to.site.VPN=Site-to-site VPN
message.zoneWizard.enable.local.storage=WARNING: If you enable local storage for this zone, you must do the following, depending on where you would like your system VMs to launch:<br/><br/>1. If system VMs need to be launched in primary storage, primary storage needs to be added to the zone after creation. You must also start the zone in a disabled state.<br/><br/>2. If system VMs need to be launched in local storage, system.vm.use.local.storage needs to be set to true before you enable the zone.<br/><br/><br/>Would you like to continue?
#modified labels (end) *******************************************************************************************
@ -61,7 +62,6 @@ label.CIDR.of.destination.network=CIDR of destination network
label.add.route=Add route
label.add.static.route=Add static route
label.remove.static.route=Remove static route
label.site.to.site.VPN=site-to-site VPN
label.add.VPN.gateway=Add VPN Gateway
message.add.VPN.gateway=Please confirm that you want to add a VPN Gateway
label.VPN.gateway=VPN Gateway

View File

@ -40,6 +40,11 @@
<artifactId>cloud-plugin-user-authenticator-plaintext</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-user-authenticator-sha256salted</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-nvp</artifactId>
@ -159,7 +164,7 @@
</fileset>
</copy>
<copy
todir="${basedir}/target/generated-webapp/WEB-INF/lib/scripts">
todir="${basedir}/target/generated-webapp/WEB-INF/classes/scripts">
<fileset dir="${basedir}/../scripts" />
</copy>
<copy
@ -186,7 +191,49 @@
</filterreader>
</filterchain>
</copy>
<copy overwrite="true" file="../console-proxy/dist/systemvm.iso" todir="${basedir}/target/generated-webapp/WEB-INF/lib/vms"/>
<copy overwrite="true" todir="${basedir}/target/generated-webapp/WEB-INF/classes">
<fileset dir="${basedir}/tomcatconf">
<include name="*.in" />
</fileset>
<globmapper from="*.in" to="*" />
<filterchain>
<filterreader classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile" value="${basedir}/../build/replace.properties" />
</filterreader>
</filterchain>
</copy>
<copy overwrite="true" todir="${basedir}/target/utilities/bin">
<fileset dir="${basedir}/../setup/bindir">
<include name="*.in" />
</fileset>
<globmapper from="*.in" to="*" />
<filterchain>
<filterreader classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile" value="${basedir}/../build/replace.properties" />
</filterreader>
</filterchain>
</copy>
<copy overwrite="true" todir="${basedir}/target/utilities/bin">
<fileset dir="${basedir}/bindir">
<include name="*.in" />
</fileset>
<globmapper from="*.in" to="*" />
<filterchain>
<filterreader classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile" value="${basedir}/../build/replace.properties" />
</filterreader>
</filterchain>
</copy>
<copy overwrite="true" todir="${basedir}/target/utilities/scripts/db">
<fileset dir="${basedir}/../setup/db">
<include name="**/*" />
</fileset>
<filterchain>
<filterreader classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile" value="${basedir}/../build/replace.properties" />
</filterreader>
</filterchain>
</copy>
</target>
</configuration>
</execution>
@ -208,6 +255,39 @@
</execution>
</executions>
</plugin>
<!-- there are the jasypt libs requires by some of the python scripts -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>2.5.1</version>
<executions>
<execution>
<id>copy</id>
<phase>package</phase>
<goals>
<goal>copy</goal>
</goals>
<configuration>
<artifactItems>
<artifactItem>
<groupId>org.jasypt</groupId>
<artifactId>jasypt</artifactId>
<version>1.9.0</version>`
<overWrite>false</overWrite>
<outputDirectory>${project.build.directory}/pythonlibs</outputDirectory>
</artifactItem>
<artifactItem>
<groupId>org.jasypt</groupId>
<artifactId>jasypt</artifactId>
<version>1.8</version>`
<overWrite>false</overWrite>
<outputDirectory>${project.build.directory}/pythonlibs</outputDirectory>
</artifactItem>
</artifactItems>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
<pluginManagement>
<plugins>

View File

@ -109,6 +109,7 @@ under the License.
<adapter name="Basic" class="com.cloud.network.ExteralIpAddressAllocator"/>
</adapters>
<adapters key="com.cloud.server.auth.UserAuthenticator">
<!-- <adapter name="SHA256SALT" class="com.cloud.server.auth.SHA256SaltedUserAuthenticator"/> -->
<adapter name="MD5" class="com.cloud.server.auth.MD5UserAuthenticator"/>
<adapter name="LDAP" class="com.cloud.server.auth.LDAPUserAuthenticator"/>
</adapters>

View File

@ -264,8 +264,35 @@ public class VirtualRoutingResource implements Manager {
return new SetPortForwardingRulesAnswer(cmd, results, endResult);
}
protected Answer SetVPCStaticNatRules(SetStaticNatRulesCommand cmd) {
String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP);
String[] results = new String[cmd.getRules().length];
int i = 0;
boolean endResult = true;
for (StaticNatRuleTO rule : cmd.getRules()) {
String args = rule.revoked() ? " -D" : " -A";
args += " -l " + rule.getSrcIp();
args += " -r " + rule.getDstIp();
String result = routerProxy("vpc_staticnat.sh", routerIp, args);
if(result == null) {
results[i++] = null;
} else {
results[i++] = "Failed";
endResult = false;
}
}
return new SetStaticNatRulesAnswer(cmd, results, endResult);
}
private Answer execute(SetStaticNatRulesCommand cmd) {
if ( cmd.getVpcId() != null ) {
return SetVPCStaticNatRules(cmd);
}
String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP);
String[] results = new String[cmd.getRules().length];
int i = 0;

2
debian/control vendored
View File

@ -90,7 +90,7 @@ Description: CloudStack agent libraries
Package: cloud-agent
Architecture: any
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version}), cloud-python (= ${source:Version}), cloud-agent-libs (= ${source:Version}), cloud-scripts (= ${source:Version}), libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, liblog4j1.2-java (>= 1.2.16), libjna-java, wget, jsvc, lsb-base (>= 3.2)
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version}), cloud-python (= ${source:Version}), cloud-agent-libs (= ${source:Version}), cloud-scripts (= ${source:Version}), cloud-system-iso (= ${source:Version}), libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, liblog4j1.2-java (>= 1.2.16), libjna-java, wget, jsvc, lsb-base (>= 3.2)
Description: CloudStack agent
The CloudStack agent is in charge of managing shared computing resources in
a CloudStack powered cloud. Install this package if this computer

View File

@ -0,0 +1,52 @@
-- Licensed to the Apache Software Foundation (ASF) under one
-- or more contributor license agreements. See the NOTICE file
-- distributed with this work for additional information
-- regarding copyright ownership. The ASF licenses this file
-- to you under the Apache License, Version 2.0 (the
-- "License"); you may not use this file except in compliance
-- with the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing,
-- software distributed under the License is distributed on an
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- KIND, either express or implied. See the License for the
-- specific language governing permissions and limitations
-- under the License.
-- Add a default ROOT domain
INSERT INTO `cloud`.`domain` (id, name, parent, path, owner) VALUES
(1, 'ROOT', NULL, '/', 2);
-- Add system and admin accounts
INSERT INTO `cloud`.`account` (id, account_name, type, domain_id, state) VALUES
(1, 'system', 1, 1, 'enabled');
INSERT INTO `cloud`.`account` (id, account_name, type, domain_id, state) VALUES
(2, 'admin', 1, 1, 'enabled');
-- Add system user
INSERT INTO `cloud`.`user` (id, username, password, account_id, firstname,
lastname, email, state, created) VALUES (1, 'system', RAND(),
'1', 'system', 'cloud', NULL, 'enabled', NOW());
-- Add system user with encrypted password=password
INSERT INTO `cloud`.`user` (id, username, password, account_id, firstname,
lastname, email, state, created) VALUES (2, 'admin', '5f4dcc3b5aa765d61d8327deb882cf99',
'2', 'Admin', 'User', 'admin@mailprovider.com', 'enabled', NOW());
-- Add configurations
INSERT INTO `cloud`.`configuration` (category, instance, component, name, value)
VALUES ('Hidden', 'DEFAULT', 'management-server', 'init', 'false');
INSERT INTO `cloud`.`configuration` (category, instance, component, name, value)
VALUES ('Advanced', 'DEFAULT', 'management-server',
'integration.api.port', '8096');
-- Add developer configuration entry; allows management server to be run as a user other than "cloud"
INSERT INTO `cloud`.`configuration` (category, instance, component, name, value)
VALUES ('Advanced', 'DEFAULT', 'management-server',
'developer', 'true');
commit;

View File

@ -303,32 +303,16 @@
</configuration>
</execution>
<execution>
<id>prefill-schema</id>
<id>prefill-developer-schema</id>
<phase>process-test-resources</phase>
<goals>
<goal>execute</goal>
</goals>
<configuration>
<sqlCommand>INSERT INTO `cloud`.`domain` (id, name,
parent, path, owner) VALUES (1, 'ROOT', NULL, '/',
2)</sqlCommand>
</configuration>
</execution>
<execution>
<id>prefill-configuration</id>
<phase>process-test-resources</phase>
<goals>
<goal>execute</goal>
</goals>
<configuration>
<sqlCommand>INSERT INTO `cloud`.`configuration`
(category, instance, component, name, value) VALUES
('Hidden', 'DEFAULT', 'management-server', 'init',
'false')</sqlCommand>
<sqlCommand>INSERT INTO `cloud`.`configuration`
(category, instance, component, name, value) VALUES
('Advanced', 'DEFAULT', 'management-server', 'integration.api.port',
'8096')</sqlCommand>
<autocommit>true</autocommit>
<srcFiles>
<srcFile>${basedir}/developer-prefill.sql</srcFile>
</srcFiles>
</configuration>
</execution>
</executions>

View File

@ -32,7 +32,7 @@
Anyone contributing to CloudStack should be on this mailing list.</para>
<para>You can also report bugs in CloudStack using the <ulink
url="https://issues.apache.org/jira/secure/CreateIssue!default.jspa">Apache Defect Tracking
System</ulink></para>
System</ulink>.</para>
<para>To posts to the lists, you'll need to be subscribed. See the <ulink
url="http://incubator.apache.org/cloudstack/mailing-lists.html">CloudStack Web site</ulink>
for instructions.</para>
@ -92,11 +92,11 @@
<tbody>
<row>
<entry><para>Name</para></entry>
<entry><para>systemvm-vmware-3.0.5</para></entry>
<entry><para>systemvm-vmware-3.0.0</para></entry>
</row>
<row>
<entry><para>Description</para></entry>
<entry><para>systemvm-vmware-3.0.5</para></entry>
<entry><para>systemvm-vmware-3.0.0</para></entry>
</row>
<row>
<entry><para>URL</para></entry>
@ -174,6 +174,7 @@
<programlisting><prompt>#</prompt> <command>apt-get</command> update
<prompt>#</prompt> <command>apt-get</command> upgrade cloud-*</programlisting>
</para>
<para>You will, of course, have to agree to the changes suggested by Yum or APT.</para>
<note>
<para>If the upgrade output includes a message similar to the following, then some
custom content was found in your old components.xml, and you need to merge the two
@ -559,8 +560,8 @@
</row>
<row>
<entry><para>VMware</para></entry>
<entry><para>Name: systemvm-vmware-3.0.5</para>
<para>Description: systemvm-vmware-3.0.5</para>
<entry><para>Name: systemvm-vmware-3.0.0</para>
<para>Description: systemvm-vmware-3.0.0</para>
<para>URL:
http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova</para>
<para>Zone: Choose the zone where this hypervisor is used</para>
@ -619,6 +620,7 @@
<programlisting><prompt>#</prompt> <command>apt-get</command> update
<prompt>#</prompt> <command>apt-get</command> upgrade cloud-*</programlisting>
</para>
<para>You will, of course, have to agree to the changes suggested by Yum or APT.</para>
</listitem>
<listitem>
<para>If you have made changes to your existing copy of the file components.xml in your

View File

@ -42,8 +42,8 @@
</note>
<section id="configure-package-repository-deb">
<title>DEB package repository</title>
<para>You can add a DEB package repository to your apt sources with the following commands. Please note that currently only packages for Ubuntu 12.04 LTS (precise) are being build.</para>
<para>Use your preferred editor and open (or create) <filename>/etc/apt/sources.list.d/cloudstack</filename>. Add the community provided repository to the file:</para>
<para>You can add a DEB package repository to your apt sources with the following commands. Please note that only packages for Ubuntu 12.04 LTS (precise) are being built at this time.</para>
<para>Use your preferred editor and open (or create) <filename>/etc/apt/sources.list.d/cloudstack.list</filename>. Add the community provided repository to the file:</para>
<programlisting>deb http://cloudstack.apt-get.eu/ubuntu precise 4.0</programlisting>
<para>We now have to add the public key to the trusted keys.</para>
<programlisting language="Bash"><prompt>$</prompt> <command>wget</command> -O - http://cloudstack.apt-get.eu/release.asc|apt-key add -</programlisting>
@ -53,20 +53,17 @@
</section>
<section id="configure-package-repository-rpm">
<title>RPM package repository</title>
<para>If you're using an RPM-based system, you'll want to add the Yum repository so that you can install CloudStack with Yum.</para>
<para>There is a RPM package repository for &PRODUCT; so you can easily install on RHEL based platforms.</para>
<para>If you're using an RPM-based system, you'll want to add the Yum repository so that you can install &PRODUCT; with Yum.</para>
<para>Yum repository information is found under <filename>/etc/yum.repos.d</filename>. You'll see several <filename>.repo</filename> files in this directory, each one denoting a specific repository.</para>
<para>To add the CloudStack repository, visit the <ulink url="http://incubator.apache.org/cloudstack/downloads.html">downloads page</ulink> for the repository information. It will look something like this:</para>
<para>To add the &PRODUCT; repository, create <filename>/etc/yum.repos.d/cloudstack.repo</filename> and insert the following information.</para>
<programlisting>
[cloudstack]
name=cloudstack
baseurl=<replaceable>http://server.url/downloads/rpm/stable/</replaceable>
baseurl=<replaceable>http://cloudstack.apt-get.eu/rhel/4.0/</replaceable>
enabled=1
gpgcheck=1
gpgcheck=0
</programlisting>
<para>Next you'll want to add the GPG key:</para>
<screen>
<command>$ rpm --import http://server.url/downloads/RPM-GPG-KEY.txt</command>
</screen>
<para>Now you should be able to install CloudStack using Yum.</para>
</section>
</section>

View File

@ -28,7 +28,7 @@
<orderedlist>
<listitem><para>Open your favorite Web browser and go to this URL. Substitute the IP address of your own Management Server:</para>
<programlisting>http://&lt;management-server-ip-address&gt;:8080/client</programlisting>
<para>On a fresh Management Server installation, a guided tour splash screen appears. On later visits, youll see a login screen where you can enter a user ID and password and proceed to your Dashboard.</para>
<para>After logging into a fresh Management Server installation, a guided tour splash screen appears. On later visits, youll be taken directly into the Dashboard.</para>
</listitem>
<listitem><para>If you see the first-time splash screen, choose one of the following.</para>
<itemizedlist>

View File

@ -33,8 +33,9 @@
packages will depend on everything you need to run the Management server.</para>
<section id="vhd-util">
<title>Downloading vhd-util</title>
<para>This procedure is required only for installations where XenServer is installed on the hypervisor hosts.</para>
<para>Before setting up the Management Server, download vhd-util from <ulink
url="http://download.cloud.com.s3.amazonaws.com/tools/vhd-util">vhd-util</ulink></para>
url="http://download.cloud.com.s3.amazonaws.com/tools/vhd-util">vhd-util</ulink>.</para>
<para>If the Management Server is RHEL or CentOS, copy vhd-util to
/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver.</para>
<para>If the Management Server is Ubuntu, copy vhd-util to
@ -49,4 +50,4 @@
<title>Install on Ubuntu</title>
<programlisting language="Bash">apt-get install cloud-client</programlisting>
</section>
</section>
</section>

View File

@ -32,6 +32,7 @@ linkend="sect-source-buildrpm"/> or <xref linkend="sect-source-builddebs"/> as
appropriate. </para>
</listitem>
<listitem>
<para>This step is required only for installations where XenServer is installed on the hypervisor hosts.</para>
<para>Download vhd-util from <ulink
url="http://download.cloud.com.s3.amazonaws.com/tools/vhd-util">vhd-util</ulink></para>
<para>If the Management Server is RHEL or CentOS, copy vhd-util to
@ -41,21 +42,23 @@ linkend="sect-source-buildrpm"/> or <xref linkend="sect-source-builddebs"/> as
</listitem>
<listitem>
<para> Ensure that necessary services are started and set to start on boot.
<programlisting><prompt>#</prompt> <command>service</command> rpcbind start
<prompt>#</prompt> <command>service</command> nfs start
<prompt>#</prompt> <command>chkconfig</command> nfs on
<prompt>#</prompt> <command>chkconfig</command> rpcbind on </programlisting>
<programlisting><prompt>#</prompt> service rpcbind start
<prompt>#</prompt> service nfs start
<prompt>#</prompt> chkconfig nfs on
<prompt>#</prompt> chkconfig rpcbind on
</programlisting>
</para>
</listitem>
<listitem>
<para> Configure the database client. Note the absence of the --deploy-as argument in this
<para>Configure the database client. Note the absence of the --deploy-as argument in this
case. (For more details about the arguments to this command, see <xref
linkend="management-server-install-db-external"/>.) </para>
<programlisting><prompt>#</prompt> <command>cloud-setup-databases</command> cloud:<replaceable>dbpassword</replaceable>@<replaceable>dbhost</replaceable> -e <replaceable>encryption_type</replaceable> -m <replaceable>management_server_key</replaceable> -k <replaceable>database_key</replaceable> </programlisting>
<programlisting><prompt>#</prompt> cloud-setup-databases cloud:<replaceable>dbpassword</replaceable>@<replaceable>dbhost</replaceable> -e <replaceable>encryption_type</replaceable> -m <replaceable>management_server_key</replaceable> -k <replaceable>database_key</replaceable>
</programlisting>
</listitem>
<listitem>
<para>Configure the OS and start the Management Server:</para>
<programlisting><prompt>#</prompt> <command>cloud-setup-management</command></programlisting>
<programlisting><prompt>#</prompt> cloud-setup-management</programlisting>
<para>The Management Server on this node should now be running.</para>
</listitem>
<listitem>
@ -66,4 +69,4 @@ linkend="sect-source-buildrpm"/> or <xref linkend="sect-source-builddebs"/> as
Load Balancing.</para>
</listitem>
</orderedlist>
</section>
</section>

View File

@ -23,23 +23,21 @@
-->
<section id="management-server-installation-overview">
<title>Management Server Installation Overview</title>
<para>This section describes installing the Management Server. There are two slightly different installation flows, depending on how many Management Server nodes will be in your cloud:</para>
<itemizedlist>
<listitem><para>A single Management Server node, with MySQL on the same node.</para></listitem>
<listitem><para>Multiple Management Server nodes, with MySQL on a node separate from the Management Servers.</para></listitem>
</itemizedlist>
<para>In either case, each machine must meet the system requirements described in System Requirements.</para>
<warning><para>For the sake of security, be sure the public Internet can not access port 8096 or port 8250 on the Management Server.</para></warning>
<para>
The procedure for installing the Management Server is:
</para>
<orderedlist>
<title>Management Server Installation Overview</title>
<para>This section describes installing the Management Server. There are two slightly different installation flows, depending on how many Management Server nodes will be in your cloud:</para>
<itemizedlist>
<listitem><para>A single Management Server node, with MySQL on the same node.</para></listitem>
<listitem><para>Multiple Management Server nodes, with MySQL on a node separate from the Management Servers.</para></listitem>
</itemizedlist>
<para>In either case, each machine must meet the system requirements described in System Requirements.</para>
<warning><para>For the sake of security, be sure the public Internet can not access port 8096 or port 8250 on the Management Server.</para></warning>
<para>The procedure for installing the Management Server is:</para>
<orderedlist>
<listitem>
<para>Prepare the Operating System</para>
</listitem>
<listitem>
<para>Download and install vhd-util.</para>
<para>(XenServer only) Download and install vhd-util.</para>
</listitem>
<listitem><para>Install the First Management Server</para></listitem>
<listitem><para>Install and Configure the MySQL database</para></listitem>

View File

@ -27,7 +27,7 @@
<listitem><para>addNetworkServiceProvider</para></listitem>
<listitem>
<itemizedlist>
<listitem><para>name = "NiciraNVP"</para></listitem>
<listitem><para>name = "NiciraNvp"</para></listitem>
<listitem><para>physicalnetworkid = &lt;the uuid of the physical network&gt;</para></listitem>
</itemizedlist>
</listitem>

View File

@ -34,7 +34,7 @@
<row>
<entry><para>NetScaler ADC Type</para></entry>
<entry><para>Description of Capabilities</para></entry>
<entry><para>&PRODUCT; 3.0.3 Supported Features</para></entry>
<entry><para>&PRODUCT; Supported Features</para></entry>
</row>
</thead>
<tbody>

View File

@ -35,7 +35,7 @@
<listitem><para>First use vCenter to exit the vCenter maintenance mode.</para>
<para>This makes the host ready for &PRODUCT; to reactivate it.</para></listitem>
<listitem><para>Then use &PRODUCT;'s administrator UI to cancel the &PRODUCT; maintenance mode</para>
<para>When the host comes back online, the VMs that were migrated off of it are migrated back to it and new VMs can be added.</para></listitem>
<para>When the host comes back online, the VMs that were migrated off of it may be migrated back to it manually and new VMs can be added.</para></listitem>
</orderedlist></listitem>
</orderedlist>
</section>

View File

@ -54,9 +54,9 @@
<section id="sect-source-verify-md5">
<title>MD5</title>
<para>
In addition to the cryptographic signature, the &PRODUCT; provides a number
of cryptographic hashes to aid in assurance of validity of the downloaded
release. You can verify this hash by executing the following command:
In addition to the cryptographic signature, &PRODUCT; has an MD5 checksum
that you can use to verify the download matches the release.
You can verify this hash by executing the following command:
<programlisting><prompt>$</prompt> <command>gpg</command> --print-md MD5 apache-cloudstack-4.0.0-incubating-src.tar.bz2 | <command>diff</command> - apache-cloudstack-4.0.0-incubating-src.tar.bz2.md5</programlisting>
</para>
<para>

View File

@ -0,0 +1,98 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# chkconfig: 35 99 10
# description: Cloud Agent
# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well
. /etc/rc.d/init.d/functions
whatami=cloud-external-ipallocator
# set environment variables
SHORTNAME="$whatami"
PIDFILE=/var/run/"$whatami".pid
LOCKFILE=/var/lock/subsys/"$SHORTNAME"
LOGFILE=/var/log/cloud/ipallocator/ipallocator.log
PROGNAME="External IPAllocator"
unset OPTIONS
[ -r /etc/sysconfig/"$SHORTNAME" ] && source /etc/sysconfig/"$SHORTNAME"
DAEMONIZE=/usr/bin/cloud-daemonize
PROG=/usr/bin/cloud-external-ipallocator.py
OPTIONS=8083
start() {
echo -n $"Starting $PROGNAME: "
if hostname --fqdn >/dev/null 2>&1 ; then
daemon --check=$SHORTNAME --pidfile=${PIDFILE} "$DAEMONIZE" \
-n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" "$PROG" $OPTIONS
RETVAL=$?
echo
else
failure
echo
echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr
RETVAL=9
fi
[ $RETVAL = 0 ] && touch ${LOCKFILE}
return $RETVAL
}
stop() {
echo -n $"Stopping $PROGNAME: "
killproc -p ${PIDFILE} $SHORTNAME # -d 10 $SHORTNAME
RETVAL=$?
echo
[ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE}
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status -p ${PIDFILE} $SHORTNAME
RETVAL=$?
;;
restart)
stop
sleep 3
start
;;
condrestart)
if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then
stop
sleep 3
start
fi
;;
*)
echo $"Usage: $whatami {start|stop|restart|condrestart|status|help}"
RETVAL=3
esac
exit $RETVAL

View File

@ -0,0 +1,107 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# cloud-management This shell script takes care of starting and stopping Tomcat
#
# chkconfig: - 80 20
#
### BEGIN INIT INFO
# Provides: tomcat6
# Required-Start: $network $syslog
# Required-Stop: $network $syslog
# Default-Start:
# Default-Stop:
# Description: Release implementation for Servlet 2.5 and JSP 2.1
# Short-Description: start and stop tomcat
### END INIT INFO
#
# - originally written by Henri Gomez, Keith Irwin, and Nicolas Mailhot
# - heavily rewritten by Deepak Bhole and Jason Corley
#
if [ -r /etc/rc.d/init.d/functions ]; then
. /etc/rc.d/init.d/functions
fi
if [ -r /lib/lsb/init-functions ]; then
. /lib/lsb/init-functions
fi
NAME="$(basename $0)"
stop() {
SHUTDOWN_WAIT="30"
count="0"
if [ -f /var/run/cloud-management.pid ]; then
pid=`cat /var/run/cloud-management.pid`
kill $pid &>/dev/null
until [ "$(ps --pid $pid | grep -c $pid)" -eq "0" ] || \
[ "$count" -gt "$SHUTDOWN_WAIT" ]
do
sleep 1
let count="${count}+1"
done
if [ "$(ps --pid $pid | grep -c $pid)" -eq "0" ]; then
log_success_msg "Stopping cloud-management:"
else
log_failure_msg "Stopping cloud-management:"
fi
else
echo "Cannot find PID file of Cloud-management"
log_failure_msg "Stopping cloud-management:"
fi
}
set_ulimit() {
fd_limit=`ulimit -n`
if [ "$fd_limit" != "4096" ]; then
user=`whoami`
if [ $user == "root" ]; then
ulimit -n 4096
fi
fi
}
handle_pid_file() {
if [ "$1" -ne 0 ] ; then
echo "The pid file locates at /var/run/cloud-management.pid and lock file at /var/lock/subsys/cloud-management.
Starting cloud-management will take care of them or you can manually clean up."
fi
}
# See how we were called.
case "$1" in
status)
status ${NAME}
RETVAL=$?
handle_pid_file $RETVAL
;;
stop)
stop
;;
restart)
stop
set start
set_ulimit
. /etc/rc.d/init.d/tomcat6
;;
*)
set_ulimit
. /etc/rc.d/init.d/tomcat6
esac
exit $RETVAL

View File

@ -0,0 +1,23 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This file is loaded in /etc/init.d/vmopsmanagement
# ATM we only do two things here:
dummy=1 ; export TOMCAT_CFG=/etc/cloud/management/tomcat6.conf ; . /etc/cloud/management/tomcat6.conf
#--------------------------

View File

@ -0,0 +1,278 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
%define __os_install_post %{nil}
%global debug_package %{nil}
# DISABLE the post-percentinstall java repacking and line number stripping
# we need to find a way to just disable the java repacking and line number stripping, but not the autodeps
Name: cloud
Summary: CloudStack IaaS Platform
#http://fedoraproject.org/wiki/PackageNamingGuidelines#Pre-Release_packages
%if "%{?_prerelease}" != ""
%define _maventag %{_ver}-SNAPSHOT
Release: %{_rel}%{dist}
%else
%define _maventag %{_ver}
Release: %{_rel}%{dist}
%endif
Version: %{_ver}
License: Apache License 2.0
Vendor: Apache CloudStack <cloudstack-dev@incubator.apache.org>
Packager: Apache CloudStack <cloudstack-dev@incubator.apache.org>
Group: System Environment/Libraries
# FIXME do groups for every single one of the subpackages
Source0: %{name}-%{_maventag}.tgz
BuildRoot: %{_tmppath}/%{name}-%{_maventag}-%{release}-build
BuildRequires: java-1.6.0-openjdk-devel
BuildRequires: tomcat6
BuildRequires: ws-commons-util
BuildRequires: jpackage-utils
BuildRequires: gcc
BuildRequires: glibc-devel
BuildRequires: /usr/bin/mkisofs
BuildRequires: MySQL-python
#BuildRequires: maven => 3.0.0
%description
CloudStack is a highly-scalable elastic, open source,
intelligent IaaS cloud implementation.
%package management-server
Summary: CloudStack management server UI
Requires: tomcat6
Requires: java >= 1.6.0
Requires: python
Requires: bash
Requires: bzip2
Requires: gzip
Requires: unzip
Requires: /sbin/mount.nfs
Requires: openssh-clients
Requires: nfs-utils
Requires: wget
Requires: mysql-connector-java
Requires: ws-commons-util
Requires: jpackage-utils
Requires: sudo
Requires: /sbin/service
Requires: /sbin/chkconfig
Requires: /usr/bin/ssh-keygen
Requires: mkisofs
Requires: MySQL-python
Requires: python-paramiko
Requires: ipmitool
Requires: %{name}-setup = %{_ver}
Group: System Environment/Libraries
%description management-server
The CloudStack management server is the central point of coordination,
management, and intelligence in CloudStack.
%package setup
Summary: CloudStack database setup scripts
Requires: java >= 1.6.0
Requires: python
Requires: MySQL-python
Requires: %{name}-python = %{_ver}
Group: System Environment/Libraries
%description setup
The scripts and commands used to setup and configure the database
%package python
Summary: CloudStack Python library
# FIXME nuke the archdependency
Requires: python
Group: System Environment/Libraries
%description python
The CloudStack Python library contains a few Python modules that the
CloudStack uses.
%prep
echo Doing CloudStack build
%setup -q -n %{name}-%{_maventag}
%build
# this fixes the /usr/com bug on centos5
%define _localstatedir /var
%define _sharedstatedir /var/lib
cp packaging/centos63/replace.properties build/replace.properties
echo VERSION=%{_maventag} >> build/replace.properties
echo PACKAGE=%{name} >> build/replace.properties
mvn package -Dsystemvm
%install
[ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT}
mkdir -p ${RPM_BUILD_ROOT}%{_bindir}
mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}/setup
mkdir -p ${RPM_BUILD_ROOT}/usr/share/%{name}/management/
ln -sf /usr/share/tomcat6/bin ${RPM_BUILD_ROOT}/usr/share/%{name}/management/bin
ln -sf /etc/cloud/management ${RPM_BUILD_ROOT}/usr/share/%{name}/management/conf
ln -sf /usr/share/tomcat6/lib ${RPM_BUILD_ROOT}/usr/share/%{name}/management/lib
ln -sf /var/log/cloud/management ${RPM_BUILD_ROOT}/usr/share/%{name}/management/logs
ln -sf /var/cache/cloud/management/temp ${RPM_BUILD_ROOT}/usr/share/%{name}/management/temp
ln -sf /var/cache/cloud/management/work ${RPM_BUILD_ROOT}/usr/share/%{name}/management/work
mkdir -p ${RPM_BUILD_ROOT}/usr/share/%{name}/management/webapps/client
mkdir -p ${RPM_BUILD_ROOT}/var/log/%{name}/management
mkdir -p ${RPM_BUILD_ROOT}/var/log/%{name}/agent
mkdir -p ${RPM_BUILD_ROOT}/var/log/%{name}/awsapi
mkdir -p ${RPM_BUILD_ROOT}/var/log/%{name}/ipallocator
mkdir -p ${RPM_BUILD_ROOT}/var/cache/%{name}/management/work
mkdir -p ${RPM_BUILD_ROOT}/var/cache/%{name}/management/temp
mkdir -p ${RPM_BUILD_ROOT}/var/lib/%{name}/mnt
mkdir -p ${RPM_BUILD_ROOT}/var/lib/%{name}/management
mkdir -p ${RPM_BUILD_ROOT}/etc/%{name}/management
mkdir -p ${RPM_BUILD_ROOT}/etc/%{name}/management/Catalina/localhost/client
mkdir -p ${RPM_BUILD_ROOT}/etc/rc.d/init.d
mkdir -p ${RPM_BUILD_ROOT}/etc/sysconfig
mkdir -p ${RPM_BUILD_ROOT}/etc/%{name}/management/Catalina/localhost/client
install -D client/target/utilities/bin/* ${RPM_BUILD_ROOT}%{_bindir}
install -D console-proxy/dist/systemvm.iso ${RPM_BUILD_ROOT}/usr/share/%{name}/management/webapps/client/WEB-INF/classes/vms/systemvm.iso
install -D console-proxy/dist/systemvm.zip ${RPM_BUILD_ROOT}/usr/share/%{name}/management/webapps/client/WEB-INF/classes/vms/systemvm.zip
cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}/setup
cp -r client/target/cloud-client-ui-4.1.0-SNAPSHOT/* ${RPM_BUILD_ROOT}/usr/share/%{name}/management/webapps/client
for name in db.properties log4j-cloud.xml tomcat6-nonssl.conf tomcat6-ssl.conf server-ssl.xml server-nonssl.xml \
catalina.policy catalina.properties db-enc.properties classpath.conf tomcat-users.xml web.xml ; do
mv ${RPM_BUILD_ROOT}/usr/share/%{name}/management/webapps/client/WEB-INF/classes/$name \
${RPM_BUILD_ROOT}/etc/%{name}/management/$name
done
mv ${RPM_BUILD_ROOT}/usr/share/%{name}/management/webapps/client/WEB-INF/classes/context.xml \
${RPM_BUILD_ROOT}/etc/%{name}/management/Catalina/localhost/client
mkdir -p ${RPM_BUILD_ROOT}/usr/lib/python2.6/site-packages/
cp -r python/lib/cloudutils ${RPM_BUILD_ROOT}/usr/lib/python2.6/site-packages/
cp -r cloud-cli/cloudtool ${RPM_BUILD_ROOT}/usr/lib/python2.6/site-packages/
install python/lib/cloud_utils.py ${RPM_BUILD_ROOT}/usr/lib/python2.6/site-packages/cloud_utils.py
install cloud-cli/cloudapis/cloud.py ${RPM_BUILD_ROOT}/usr/lib/python2.6/site-packages/cloudapis.py
install python/bindir/cloud-external-ipallocator.py ${RPM_BUILD_ROOT}%{_bindir}/
install -D client/target/pythonlibs/jasypt-1.9.0.jar ${RPM_BUILD_ROOT}%{_javadir}/jasypt-1.9.0.jar
install -D client/target/pythonlibs/jasypt-1.8.jar ${RPM_BUILD_ROOT}%{_javadir}/jasypt-1.8.jar
install -D packaging/centos63/cloud-ipallocator.rc ${RPM_BUILD_ROOT}/etc/rc.d/init.d/%{name}-ipallocator
install -D packaging/centos63/cloud-management.rc ${RPM_BUILD_ROOT}/etc/rc.d/init.d/%{name}-management
install -D packaging/centos63/cloud-management.sysconfig ${RPM_BUILD_ROOT}/etc/sysconfig/%{name}-management
chmod 770 ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/Catalina
chmod 770 ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/Catalina/localhost
chmod 770 ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/Catalina/localhost/client
chmod 770 ${RPM_BUILD_ROOT}%{_sharedstatedir}/%{name}/mnt
chmod 770 ${RPM_BUILD_ROOT}%{_sharedstatedir}/%{name}/management
chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/cache/%{name}/management/work
chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/cache/%{name}/management/temp
chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/management
chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/agent
chmod -R ugo+x ${RPM_BUILD_ROOT}/usr/share/%{name}/management/webapps/client/WEB-INF/classes/scripts
%clean
[ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT}
%preun management-server
/sbin/service %{name}-management stop || true
if [ "$1" == "0" ] ; then
/sbin/chkconfig --del %{name}-management > /dev/null 2>&1 || true
/sbin/service %{name}-management stop > /dev/null 2>&1 || true
fi
%pre management-server
id %{name} > /dev/null 2>&1 || /usr/sbin/useradd -M -c "CloudStack unprivileged user" \
-r -s /bin/sh -d %{_sharedstatedir}/%{name}/management %{name}|| true
# set max file descriptors for cloud user to 4096
sed -i /"cloud hard nofile"/d /etc/security/limits.conf
sed -i /"cloud soft nofile"/d /etc/security/limits.conf
echo "cloud hard nofile 4096" >> /etc/security/limits.conf
echo "cloud soft nofile 4096" >> /etc/security/limits.conf
rm -rf %{_localstatedir}/cache/%{name}
# user harcoded here, also hardcoded on wscript
%post management-server
if [ "$1" == "1" ] ; then
/sbin/chkconfig --add %{name}-management > /dev/null 2>&1 || true
/sbin/chkconfig --level 345 %{name}-management on > /dev/null 2>&1 || true
fi
if [ ! -f %{_datadir}/%{name}/management/webapps/client/WEB-INF/classes/scripts/scripts/vm/hypervisor/xenserver/vhd-util ] ; then
echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in
echo %{_datadir}/%{name}/management/webapps/client/WEB-INF/classes/scripts/vm/hypervisor/xenserver/
fi
#No default permission as the permission setup is complex
%files management-server
%defattr(-,root,root,-)
%doc LICENSE
%doc NOTICE
%dir %attr(0770,root,%{name}) %{_sysconfdir}/%{name}/management/Catalina
%dir %attr(0770,root,%{name}) %{_sysconfdir}/%{name}/management/Catalina/localhost
%dir %attr(0770,root,%{name}) %{_sysconfdir}/%{name}/management/Catalina/localhost/client
%dir %{_datadir}/%{name}/management
%dir %attr(0770,root,%{name}) %{_sharedstatedir}/%{name}/mnt
%dir %attr(0770,%{name},%{name}) %{_sharedstatedir}/%{name}/management
%dir %attr(0770,root,%{name}) %{_localstatedir}/cache/%{name}/management
%dir %attr(0770,root,%{name}) %{_localstatedir}/cache/%{name}/management/work
%dir %attr(0770,root,%{name}) %{_localstatedir}/cache/%{name}/management/temp
%dir %attr(0770,root,%{name}) %{_localstatedir}/log/%{name}/management
%dir %attr(0770,root,%{name}) %{_localstatedir}/log/%{name}/agent
%config(noreplace) %{_sysconfdir}/sysconfig/%{name}-management
%config(noreplace) %{_sysconfdir}/%{name}/management
%config(noreplace) %attr(0640,root,%{name}) %{_sysconfdir}/%{name}/management/db.properties
%config(noreplace) %{_sysconfdir}/%{name}/management/log4j-%{name}.xml
%config(noreplace) %{_sysconfdir}/%{name}/management/tomcat6-nonssl.conf
%config(noreplace) %{_sysconfdir}/%{name}/management/tomcat6-ssl.conf
%attr(0755,root,root) %{_initrddir}/%{name}-management
%attr(0755,root,root) %{_bindir}/%{name}-setup-management
%attr(0755,root,root) %{_bindir}/%{name}-update-xenserver-licenses
%{_datadir}/%{name}/management/*
%files setup
%attr(0755,root,root) %{_bindir}/%{name}-setup-databases
%attr(0755,root,root) %{_bindir}/%{name}-migrate-databases
%attr(0755,root,root) %{_bindir}/%{name}-set-guest-password
%attr(0755,root,root) %{_bindir}/%{name}-set-guest-sshkey
%attr(0755,root,root) %{_bindir}/%{name}-sysvmadm
%attr(0755,root,root) %{_bindir}/%{name}-setup-encryption
%dir %{_datadir}/%{name}/setup
%{_datadir}/%{name}/setup/*.sql
%{_datadir}/%{name}/setup/db/*.sql
%{_datadir}/%{name}/setup/*.sh
%{_datadir}/%{name}/setup/server-setup.xml
%{_javadir}/jasypt-1.9.0.jar
%{_javadir}/jasypt-1.8.jar
%doc LICENSE
%doc NOTICE
%files python
%defattr(0644,root,root,0755)
%{_prefix}/lib*/python*/site-packages/%{name}*
%attr(0755,root,root) %{_bindir}/cloud-external-ipallocator.py
%attr(0755,root,root) %{_initrddir}/cloud-ipallocator
%dir %attr(0770,root,root) %{_localstatedir}/log/%{name}/ipallocator
%doc LICENSE
%doc NOTICE
%changelog
* Fri Oct 03 2012 Hugo Trippaers <hugo@apache.org> 4.1.0
- new style spec file

View File

@ -0,0 +1,45 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
CWD=`pwd`
RPMDIR=$CWD/../../dist/rpmbuild
VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep -v '^\['`
if echo $VERSION | grep SNAPSHOT ; then
REALVER=`echo $VERSION | cut -d '-' -f 1`
DEFVER="-D_ver $REALVER"
DEFPRE="-D_prerelease 1"
DEFREL="-D_rel SNAPSHOT"
else
DEFVER="-D_ver $REALVER"
DEFPRE=
DEFREL=
fi
mkdir -p $RPMDIR/SPECS
mkdir -p $RPMDIR/SOURCES/cloud-$VERSION
(cd ../../; tar -c --exclude .git --exclude dist . | tar -C $RPMDIR/SOURCES/cloud-$VERSION -x )
(cd $RPMDIR/SOURCES/; tar -czf cloud-$VERSION.tgz cloud-$VERSION)
cp cloud.spec $RPMDIR/SPECS
(cd $RPMDIR; rpmbuild -ba SPECS/cloud.spec "-D_topdir $RPMDIR" "$DEFVER" "$DEFREL" "$DEFPRE" )

View File

@ -0,0 +1,61 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
DBUSER=cloud
DBPW=cloud
DBROOTPW=
MSLOG=vmops.log
APISERVERLOG=api.log
DBHOST=localhost
MSMNTDIR=/mnt
COMPONENTS-SPEC=components-premium.xml
AWSAPILOG=awsapi.log
REMOTEHOST=localhost
AGENTCLASSPATH=
AGENTLOG=/var/log/cloud/agent/agent.log
AGENTLOGDIR=/var/log/cloud/agent/
AGENTSYSCONFDIR=/etc/cloud/agent
APISERVERLOG=/var/log/cloud/management/apilog.log
AWSAPILOG=/var/log/cloud/awsapi/awsapi.log
BINDIR=/usr/bin
COMMONLIBDIR=/usr/share/java
CONFIGUREVARS=
DEPSCLASSPATH=
DOCDIR=
IPALOCATORLOG=/var/log/cloud/management/ipallocator.log
JAVADIR=/usr/share/java
LIBEXECDIR=/usr/libexec
LOCKDIR=/var/lock
MSCLASSPATH=
MSCONF=/etc/cloud/management
MSENVIRON=/usr/share/cloud/management
MSLOG=/var/log/cloud/management/management-server.log
MSLOGDIR=/var/log/cloud/management/
MSMNTDIR=/var/lib/cloud/mnt
MSUSER=cloud
PIDDIR=/var/run
PLUGINJAVADIR=
PREMIUMJAVADIR=
PYTHONDIR=/usr/lib/python2.6/site-packages/
SERVERSYSCONFDIR=/etc/sysconfig
SETUPDATADIR=/usr/share/cloud/setup
SYSCONFDIR=/etc/sysconfig
SYSTEMCLASSPATH=
SYSTEMJARS=
USAGECLASSPATH=
USAGELOG=/var/log/cloud/usage
USAGESYSCONFDIR=/etc/sysconfig

View File

@ -597,6 +597,11 @@ setup_vpcrouter() {
setup_vmware_extra_nics
fi
if [ -f /etc/hosts ]; then
host=`hostname -s`;
grep -q $host /etc/hosts || echo "127.0.0.1 $host" >> /etc/hosts;
fi
cat > /etc/network/interfaces << EOF
auto lo $1
iface lo inet loopback

View File

@ -45,6 +45,7 @@
<module>user-authenticators/ldap</module>
<module>user-authenticators/md5</module>
<module>user-authenticators/plain-text</module>
<module>user-authenticators/sha256salted</module>
</modules>
<dependencies>

View File

@ -15,6 +15,8 @@
//
package com.cloud.server.auth;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Map;
@ -31,6 +33,7 @@ import javax.naming.directory.SearchControls;
import javax.naming.directory.SearchResult;
import org.apache.log4j.Logger;
import org.bouncycastle.util.encoders.Base64;
import com.cloud.api.ApiConstants.LDAPParams;
import com.cloud.configuration.Config;
@ -40,6 +43,7 @@ import com.cloud.user.UserAccount;
import com.cloud.user.dao.UserAccountDao;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
@Local(value={UserAuthenticator.class})
@ -159,4 +163,17 @@ public class LDAPUserAuthenticator extends DefaultUserAuthenticator {
_userAccountDao = locator.getDao(UserAccountDao.class);
return true;
}
@Override
public String encode(String password) {
// Password is not used, so set to a random string
try {
SecureRandom randomGen = SecureRandom.getInstance("SHA1PRNG");
byte bytes[] = new byte[20];
randomGen.nextBytes(bytes);
return Base64.encode(bytes).toString();
} catch (NoSuchAlgorithmException e) {
throw new CloudRuntimeException("Failed to generate random password",e);
}
}
}

View File

@ -15,6 +15,9 @@
package com.cloud.server.auth;
import java.math.BigInteger;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Map;
import javax.ejb.Local;
@ -26,6 +29,7 @@ import com.cloud.server.ManagementServer;
import com.cloud.user.UserAccount;
import com.cloud.user.dao.UserAccountDao;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.exception.CloudRuntimeException;
/**
* Simple UserAuthenticator that performs a MD5 hash of the password before
@ -49,31 +53,7 @@ public class MD5UserAuthenticator extends DefaultUserAuthenticator {
return false;
}
/**
MessageDigest md5;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new CloudRuntimeException("Error", e);
}
md5.reset();
BigInteger pwInt = new BigInteger(1, md5.digest(password.getBytes()));
// make sure our MD5 hash value is 32 digits long...
StringBuffer sb = new StringBuffer();
String pwStr = pwInt.toString(16);
int padding = 32 - pwStr.length();
for (int i = 0; i < padding; i++) {
sb.append('0');
}
sb.append(pwStr);
**/
// Will: The MD5Authenticator is now a straight pass-through comparison of the
// the passwords because we will not assume that the password passed in has
// already been MD5 hashed. I am keeping the above code in case this requirement changes
// or people need examples of how to MD5 hash passwords in java.
if (!user.getPassword().equals(password)) {
if (!user.getPassword().equals(encode(password))) {
s_logger.debug("Password does not match");
return false;
}
@ -87,4 +67,25 @@ public class MD5UserAuthenticator extends DefaultUserAuthenticator {
_userAccountDao = locator.getDao(UserAccountDao.class);
return true;
}
@Override
public String encode(String password) {
MessageDigest md5 = null;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new CloudRuntimeException("Unable to hash password", e);
}
md5.reset();
BigInteger pwInt = new BigInteger(1, md5.digest(password.getBytes()));
String pwStr = pwInt.toString(16);
int padding = 32 - pwStr.length();
StringBuffer sb = new StringBuffer();
for (int i = 0; i < padding; i++) {
sb.append('0'); // make sure the MD5 password is 32 digits long
}
sb.append(pwStr);
return sb.toString();
}
}

View File

@ -87,4 +87,10 @@ public class PlainTextUserAuthenticator extends DefaultUserAuthenticator {
_userAccountDao = locator.getDao(UserAccountDao.class);
return true;
}
@Override
public String encode(String password) {
// Plaintext so no encoding at all
return password;
}
}

View File

@ -0,0 +1,29 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-user-authenticator-sha256salted</artifactId>
<name>Apache CloudStack Plugin - User Authenticator SHA256 Salted</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.1.0-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>
</project>

View File

@ -0,0 +1,122 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.server.auth;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.bouncycastle.util.encoders.Base64;
import com.cloud.server.ManagementServer;
import com.cloud.servlet.CloudStartupServlet;
import com.cloud.user.UserAccount;
import com.cloud.user.dao.UserAccountDao;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.component.Inject;
import com.cloud.utils.exception.CloudRuntimeException;
@Local(value={UserAuthenticator.class})
public class SHA256SaltedUserAuthenticator extends DefaultUserAuthenticator {
public static final Logger s_logger = Logger.getLogger(SHA256SaltedUserAuthenticator.class);
@Inject
private UserAccountDao _userAccountDao;
private static int s_saltlen = 20;
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
super.configure(name, params);
return true;
}
/* (non-Javadoc)
* @see com.cloud.server.auth.UserAuthenticator#authenticate(java.lang.String, java.lang.String, java.lang.Long, java.util.Map)
*/
@Override
public boolean authenticate(String username, String password,
Long domainId, Map<String, Object[]> requestParameters) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Retrieving user: " + username);
}
UserAccount user = _userAccountDao.getUserAccount(username, domainId);
if (user == null) {
s_logger.debug("Unable to find user with " + username + " in domain " + domainId);
return false;
}
try {
String storedPassword[] = user.getPassword().split(":");
if (storedPassword.length != 2) {
s_logger.warn("The stored password for " + username + " isn't in the right format for this authenticator");
return false;
}
byte salt[] = Base64.decode(storedPassword[0]);
String hashedPassword = encode(password, salt);
return storedPassword[1].equals(hashedPassword);
} catch (NoSuchAlgorithmException e) {
throw new CloudRuntimeException("Unable to hash password", e);
} catch (UnsupportedEncodingException e) {
throw new CloudRuntimeException("Unable to hash password", e);
}
}
/* (non-Javadoc)
* @see com.cloud.server.auth.UserAuthenticator#encode(java.lang.String)
*/
@Override
public String encode(String password) {
// 1. Generate the salt
SecureRandom randomGen;
try {
randomGen = SecureRandom.getInstance("SHA1PRNG");
byte salt[] = new byte[s_saltlen];
randomGen.nextBytes(salt);
String saltString = new String(Base64.encode(salt));
String hashString = encode(password, salt);
// 3. concatenate the two and return
return saltString + ":" + hashString;
} catch (NoSuchAlgorithmException e) {
throw new CloudRuntimeException("Unable to hash password", e);
} catch (UnsupportedEncodingException e) {
throw new CloudRuntimeException("Unable to hash password", e);
}
}
public String encode(String password, byte[] salt) throws UnsupportedEncodingException, NoSuchAlgorithmException {
byte[] passwordBytes = password.getBytes("UTF-8");
byte[] hashSource = new byte[passwordBytes.length + s_saltlen];
System.arraycopy(passwordBytes, 0, hashSource, 0, passwordBytes.length);
System.arraycopy(salt, 0, hashSource, passwordBytes.length, s_saltlen);
// 2. Hash the password with the salt
MessageDigest md = MessageDigest.getInstance("SHA-256");
md.update(hashSource);
byte[] digest = md.digest();
return new String(Base64.encode(digest));
}
}

View File

@ -0,0 +1,63 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package src.com.cloud.server.auth.test;
import static org.junit.Assert.*;
import java.io.UnsupportedEncodingException;
import java.security.NoSuchAlgorithmException;
import java.util.Collections;
import javax.naming.ConfigurationException;
import org.bouncycastle.util.encoders.Base64;
import org.junit.Before;
import org.junit.Test;
import com.cloud.server.auth.SHA256SaltedUserAuthenticator;
public class AuthenticatorTest {
@Before
public void setUp() throws Exception {
}
@Test
public void testEncode() throws UnsupportedEncodingException, NoSuchAlgorithmException {
SHA256SaltedUserAuthenticator authenticator =
new SHA256SaltedUserAuthenticator();
try {
authenticator.configure("SHA256", Collections.<String,Object>emptyMap());
} catch (ConfigurationException e) {
fail(e.toString());
}
String encodedPassword = authenticator.encode("password");
String storedPassword[] = encodedPassword.split(":");
assertEquals ("hash must consist of two components", storedPassword.length, 2);
byte salt[] = Base64.decode(storedPassword[0]);
String hashedPassword = authenticator.encode("password", salt);
assertEquals("compare hashes", storedPassword[1], hashedPassword);
}
}

View File

@ -81,7 +81,6 @@
<cs.jstl.version>1.2</cs.jstl.version>
<cs.selenium.server.version>1.0-20081010.060147</cs.selenium.server.version>
<cs.vmware.api.version>4.1</cs.vmware.api.version>
<skipTests>true</skipTests>
</properties>
@ -343,9 +342,10 @@
<id>developer</id>
<modules>
<module>developer</module>
<module>tools/apidoc</module>
<module>tools/devcloud</module>
<module>tools/marvin</module>
<module>tools/apidoc</module>
<module>tools/devcloud</module>
<module>tools/marvin</module>
<module>tools/cli</module>
</modules>
</profile>
<profile>

View File

@ -91,8 +91,15 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<argLine>-Xmx1024m</argLine>
<excludes>
<exclude>com/cloud/upgrade/*</exclude>
<exclude>com/cloud/async/*</exclude>
<exclude>com/cloud/cluster/*</exclude>
<exclude>com/cloud/snapshot/*</exclude>
<exclude>com/cloud/storage/dao/*</exclude>
<exclude>com/cloud/vm/dao/*</exclude>
<exclude>com/cloud/vpc/*</exclude>
</excludes>
</configuration>
</plugin>

View File

@ -219,6 +219,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
protected AgentMonitor _monitor = null;
protected ExecutorService _executor;
protected ThreadPoolExecutor _connectExecutor;
protected StateMachine2<Status, Status.Event, Host> _statusStateMachine = Status.getStateMachine();
@ -274,7 +275,12 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
registerForHostEvents(_monitor, true, true, false);
_executor = new ThreadPoolExecutor(threads, threads, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("AgentTaskPool"));
_connectExecutor = new ThreadPoolExecutor(100, 500, 60l, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("AgentConnectTaskPool"));
//allow core threads to time out even when there are no items in the queue
_connectExecutor.allowCoreThreadTimeOut(true);
_connection = new NioServer("AgentManager", _port, workers + 10, this);
s_logger.info("Listening on " + _port + " with " + workers + " workers");
@ -608,19 +614,19 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
ConnectionException ce = (ConnectionException)e;
if (ce.isSetupError()) {
s_logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage());
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected);
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true);
throw ce;
} else {
s_logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage());
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested);
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true);
return attache;
}
} else if (e instanceof HypervisorVersionChangedException) {
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested);
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true);
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
} else {
s_logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e);
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected);
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true);
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
}
}
@ -628,13 +634,13 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
}
Long dcId = host.getDataCenterId();
ReadyCommand ready = new ReadyCommand(dcId);
ReadyCommand ready = new ReadyCommand(dcId, host.getId());
Answer answer = easySend(hostId, ready);
if (answer == null || !answer.getResult()) {
// this is tricky part for secondary storage
// make it as disconnected, wait for secondary storage VM to be up
// return the attache instead of null, even it is disconnectede
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected);
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true);
}
agentStatusTransitTo(host, Event.Ready, _nodeId);
@ -828,6 +834,8 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
}
}
}
_connectExecutor.shutdownNow();
return true;
}
@ -836,7 +844,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
return _name;
}
protected boolean handleDisconnectWithoutInvestigation(AgentAttache attache, Status.Event event) {
protected boolean handleDisconnectWithoutInvestigation(AgentAttache attache, Status.Event event, boolean transitState) {
long hostId = attache.getId();
s_logger.info("Host " + hostId + " is disconnecting with event " + event);
@ -871,8 +879,11 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
s_logger.debug("Deregistering link for " + hostId + " with state " + nextStatus);
}
//remove the attache
removeAgent(attache, nextStatus);
if (host != null) {
//update the DB
if (host != null && transitState) {
disconnectAgent(host, event, _nodeId);
}
@ -942,7 +953,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
}
}
handleDisconnectWithoutInvestigation(attache, event);
handleDisconnectWithoutInvestigation(attache, event, true);
host = _hostDao.findById(host.getId());
if (host.getStatus() == Status.Alert || host.getStatus() == Status.Down) {
_haMgr.scheduleRestartForVmsOnHost(host, true);
@ -968,7 +979,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
if (_investigate == true) {
handleDisconnectWithInvestigation(_attache, _event);
} else {
handleDisconnectWithoutInvestigation(_attache, _event);
handleDisconnectWithoutInvestigation(_attache, _event, true);
}
} catch (final Exception e) {
s_logger.error("Exception caught while handling disconnect: ", e);
@ -1060,7 +1071,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
AgentAttache attache = null;
attache = findAttache(hostId);
if (attache != null) {
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected);
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true);
}
return true;
} else if (event == Event.ShutdownRequested) {
@ -1085,91 +1096,37 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
return attache;
}
//TODO: handle mycloud specific
private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[] startup, Request request) {
AgentAttache attache = null;
StartupAnswer[] answers = new StartupAnswer[startup.length];
try {
HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup);
ReadyCommand ready = null;
try {
HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup);
if (host != null) {
ready = new ReadyCommand(host.getDataCenterId(), host.getId());
attache = createAttacheForConnect(host, link);
attache = notifyMonitorsOfConnection(attache, startup, false);
}
Command cmd;
for (int i = 0; i < startup.length; i++) {
cmd = startup[i];
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
answers[i] = new StartupAnswer(startup[i], attache.getId(), getPingInterval());
break;
}
}
}catch (ConnectionException e) {
Command cmd;
for (int i = 0; i < startup.length; i++) {
cmd = startup[i];
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
answers[i] = new StartupAnswer(startup[i], e.toString());
break;
}
}
} catch (IllegalArgumentException e) {
Command cmd;
for (int i = 0; i < startup.length; i++) {
cmd = startup[i];
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
answers[i] = new StartupAnswer(startup[i], e.toString());
break;
}
}
} catch (CloudRuntimeException e) {
Command cmd;
for (int i = 0; i < startup.length; i++) {
cmd = startup[i];
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
answers[i] = new StartupAnswer(startup[i], e.toString());
break;
}
}
}
Response response = null;
if (attache != null) {
response = new Response(request, answers[0], _nodeId, attache.getId());
} else {
response = new Response(request, answers[0], _nodeId, -1);
} catch (Exception e) {
s_logger.debug("Failed to handle host connection: " + e.toString());
ready = new ReadyCommand(null);
ready.setDetails(e.toString());
} finally {
if (ready == null) {
ready = new ReadyCommand(null);
}
}
try {
link.send(response.toBytes());
} catch (ClosedChannelException e) {
s_logger.debug("Failed to send startupanswer: " + e.toString());
return null;
}
if (attache == null) {
return null;
}
try {
attache = notifyMonitorsOfConnection(attache, startup, false);
return attache;
} catch (ConnectionException e) {
ReadyCommand ready = new ReadyCommand(null);
ready.setDetails(e.toString());
try {
if (attache == null) {
final Request readyRequest = new Request(-1, -1, ready, false);
link.send(readyRequest.getBytes());
} else {
easySend(attache.getId(), ready);
} catch (Exception e1) {
s_logger.debug("Failed to send readycommand, due to " + e.toString());
}
return null;
} catch (CloudRuntimeException e) {
ReadyCommand ready = new ReadyCommand(null);
ready.setDetails(e.toString());
try {
easySend(attache.getId(), ready);
} catch (Exception e1) {
s_logger.debug("Failed to send readycommand, due to " + e.toString());
}
return null;
} catch (Exception e) {
s_logger.debug("Failed to send ready command:" + e.toString());
}
return attache;
}
protected class SimulateStartTask implements Runnable {
@ -1203,6 +1160,53 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
}
}
}
protected class HandleAgentConnectTask implements Runnable {
Link _link;
Command[] _cmds;
Request _request;
HandleAgentConnectTask(Link link, final Command[] cmds, final Request request) {
_link = link;
_cmds = cmds;
_request = request;
}
@Override
public void run() {
_request.logD("Processing the first command ");
StartupCommand[] startups = new StartupCommand[_cmds.length];
for (int i = 0; i < _cmds.length; i++) {
startups[i] = (StartupCommand) _cmds[i];
}
AgentAttache attache = handleConnectedAgent(_link, startups, _request);
if (attache == null) {
s_logger.warn("Unable to create attache for agent: " + _request);
}
}
}
protected void connectAgent(Link link, final Command[] cmds, final Request request) {
//send startupanswer to agent in the very beginning, so agent can move on without waiting for the answer for an undetermined time, if we put this logic into another thread pool.
StartupAnswer[] answers = new StartupAnswer[cmds.length];
Command cmd;
for (int i = 0; i < cmds.length; i++) {
cmd = cmds[i];
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
answers[i] = new StartupAnswer((StartupCommand)cmds[i], 0, getPingInterval());
break;
}
}
Response response = null;
response = new Response(request, answers[0], _nodeId, -1);
try {
link.send(response.toBytes());
} catch (ClosedChannelException e) {
s_logger.debug("Failed to send startupanswer: " + e.toString());
}
_connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request));
}
public class AgentHandler extends Task {
public AgentHandler(Task.Type type, Link link, byte[] data) {
@ -1215,21 +1219,13 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
Command cmd = cmds[0];
boolean logD = true;
Response response = null;
if (attache == null) {
request.logD("Processing the first command ");
if (!(cmd instanceof StartupCommand)) {
s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request);
return;
}
StartupCommand[] startups = new StartupCommand[cmds.length];
for (int i = 0; i < cmds.length; i++) {
startups[i] = (StartupCommand) cmds[i];
}
attache = handleConnectedAgent(link, startups, request);
if (attache == null) {
s_logger.warn("Unable to create attache for agent: " + request);
} else {
//submit the task for execution
request.logD("Scheduling the first command ");
connectAgent(link, cmds, request);
}
return;
}
@ -1295,17 +1291,23 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
if (cmd instanceof PingRoutingCommand) {
boolean gatewayAccessible = ((PingRoutingCommand) cmd).isGatewayAccessible();
HostVO host = _hostDao.findById(Long.valueOf(cmdHostId));
if (!gatewayAccessible) {
// alert that host lost connection to
// gateway (cannot ping the default route)
DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());
HostPodVO podVO = _podDao.findById(host.getPodId());
String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
if (host != null) {
if (!gatewayAccessible) {
// alert that host lost connection to
// gateway (cannot ping the default route)
DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());
HostPodVO podVO = _podDao.findById(host.getPodId());
String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc, "Host [" + hostDesc
+ "] lost connection to gateway (default route) and is possibly having network connection issues.");
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc, "Host [" + hostDesc
+ "] lost connection to gateway (default route) and is possibly having network connection issues.");
} else {
_alertMgr.clearAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId());
}
} else {
_alertMgr.clearAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId());
s_logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() +
" for agent id=" + cmdHostId + "; can't find the host in the DB");
}
}
answer = new PingAnswer((PingCommand) cmd);
@ -1328,7 +1330,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
answers[i] = answer;
}
response = new Response(request, answers, _nodeId, attache.getId());
Response response = new Response(request, answers, _nodeId, attache.getId());
if (s_logger.isDebugEnabled()) {
if (logD) {
s_logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);

View File

@ -270,7 +270,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
@Override
protected boolean handleDisconnectWithoutInvestigation(AgentAttache attache, Status.Event event) {
protected boolean handleDisconnectWithoutInvestigation(AgentAttache attache, Status.Event event, boolean transitState) {
return handleDisconnect(attache, event, false, true);
}
@ -282,7 +282,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
protected boolean handleDisconnect(AgentAttache agent, Status.Event event, boolean investigate, boolean broadcast) {
boolean res;
if (!investigate) {
res = super.handleDisconnectWithoutInvestigation(agent, event);
res = super.handleDisconnectWithoutInvestigation(agent, event, true);
} else {
res = super.handleDisconnectWithInvestigation(agent, event);
}
@ -305,7 +305,27 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
AgentAttache attache = findAttache(hostId);
if (attache != null) {
handleDisconnect(attache, Event.AgentDisconnected, false, false);
//don't process disconnect if the host is being rebalanced
if (_clusterMgr.isAgentRebalanceEnabled()) {
HostTransferMapVO transferVO = _hostTransferDao.findById(hostId);
if (transferVO != null) {
if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) {
s_logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id="
+ hostId +" as the host is being connected to " + _nodeId);
return true;
}
}
}
//don't process disconnect if the disconnect came for the host via delayed cluster notification,
//but the host has already reconnected to the current management server
if (!attache.forForward()) {
s_logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id="
+ hostId +" as the host is directly connected to the current management server " + _nodeId);
return true;
}
return super.handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, false);
}
return true;
@ -571,6 +591,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
logD(data, "Cancel request received");
}
agent.cancel(cancel.getSequence());
final Long current = agent._currentSequence;
//if the request is the current request, always have to trigger sending next request in sequence,
//otherwise the agent queue will be blocked
if (req.executeInSequence() && (current != null && current == Request.getSequence(data))) {
agent.sendNext(Request.getSequence(data));
}
return;
}
@ -853,7 +879,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
continue;
}
if (transferMap.getInitialOwner() != _nodeId || attache.forForward()) {
if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) {
s_logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host");
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
@ -936,9 +962,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
HostVO host = _hostDao.findById(hostId);
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
s_logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification");
}
result = loadDirectlyConnectedHost(host, true);
AgentAttache attache = findAttache(hostId);
if (attache != null) {
result = handleDisconnect(attache, Event.AgentDisconnected, false, false);
}
if (result) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
}
result = loadDirectlyConnectedHost(host, true);
} else {
s_logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() +
" as a part of rebalance process without notification");
}
} catch (Exception ex) {
s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:", ex);
result = false;
@ -1002,7 +1043,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
s_logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId);
_hostTransferDao.completeAgentTransfer(hostId);
handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed);
handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true);
} catch (Exception ex) {
s_logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup");
}
@ -1019,7 +1060,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
synchronized (_agents) {
ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId);
if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) {
handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance);
handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true);
ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId);
if (forwardAttache == null) {
s_logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process");

View File

@ -624,35 +624,35 @@ public class AlertManagerImpl implements AlertManager {
//Cluster Level
case CapacityVO.CAPACITY_TYPE_MEMORY:
msgSubject = "System Alert: Low Available Memory in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availablity zone " + dc.getName();
msgSubject = "System Alert: Low Available Memory in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availability zone " + dc.getName();
totalStr = formatBytesToMegabytes(totalCapacity);
usedStr = formatBytesToMegabytes(usedCapacity);
msgContent = "System memory is low, total: " + totalStr + " MB, used: " + usedStr + " MB (" + pctStr + "%)";
alertType = ALERT_TYPE_MEMORY;
break;
case CapacityVO.CAPACITY_TYPE_CPU:
msgSubject = "System Alert: Low Unallocated CPU in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availablity zone " + dc.getName();
msgSubject = "System Alert: Low Unallocated CPU in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availability zone " + dc.getName();
totalStr = _dfWhole.format(totalCapacity);
usedStr = _dfWhole.format(usedCapacity);
msgContent = "Unallocated CPU is low, total: " + totalStr + " Mhz, used: " + usedStr + " Mhz (" + pctStr + "%)";
alertType = ALERT_TYPE_CPU;
break;
case CapacityVO.CAPACITY_TYPE_STORAGE:
msgSubject = "System Alert: Low Available Storage in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availablity zone " + dc.getName();
msgSubject = "System Alert: Low Available Storage in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availability zone " + dc.getName();
totalStr = formatBytesToMegabytes(totalCapacity);
usedStr = formatBytesToMegabytes(usedCapacity);
msgContent = "Available storage space is low, total: " + totalStr + " MB, used: " + usedStr + " MB (" + pctStr + "%)";
alertType = ALERT_TYPE_STORAGE;
break;
case CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED:
msgSubject = "System Alert: Remaining unallocated Storage is low in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availablity zone " + dc.getName();
msgSubject = "System Alert: Remaining unallocated Storage is low in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availability zone " + dc.getName();
totalStr = formatBytesToMegabytes(totalCapacity);
usedStr = formatBytesToMegabytes(usedCapacity);
msgContent = "Unallocated storage space is low, total: " + totalStr + " MB, allocated: " + usedStr + " MB (" + pctStr + "%)";
alertType = ALERT_TYPE_STORAGE_ALLOCATED;
break;
case CapacityVO.CAPACITY_TYPE_LOCAL_STORAGE:
msgSubject = "System Alert: Remaining unallocated Local Storage is low in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availablity zone " + dc.getName();
msgSubject = "System Alert: Remaining unallocated Local Storage is low in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availability zone " + dc.getName();
totalStr = formatBytesToMegabytes(totalCapacity);
usedStr = formatBytesToMegabytes(usedCapacity);
msgContent = "Unallocated storage space is low, total: " + totalStr + " MB, allocated: " + usedStr + " MB (" + pctStr + "%)";
@ -661,7 +661,7 @@ public class AlertManagerImpl implements AlertManager {
//Pod Level
case CapacityVO.CAPACITY_TYPE_PRIVATE_IP:
msgSubject = "System Alert: Number of unallocated private IPs is low in pod " +pod.getName()+ " of availablity zone " + dc.getName();
msgSubject = "System Alert: Number of unallocated private IPs is low in pod " +pod.getName()+ " of availability zone " + dc.getName();
totalStr = Double.toString(totalCapacity);
usedStr = Double.toString(usedCapacity);
msgContent = "Number of unallocated private IPs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";
@ -670,28 +670,28 @@ public class AlertManagerImpl implements AlertManager {
//Zone Level
case CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE:
msgSubject = "System Alert: Low Available Secondary Storage in availablity zone " + dc.getName();
msgSubject = "System Alert: Low Available Secondary Storage in availability zone " + dc.getName();
totalStr = formatBytesToMegabytes(totalCapacity);
usedStr = formatBytesToMegabytes(usedCapacity);
msgContent = "Available secondary storage space is low, total: " + totalStr + " MB, used: " + usedStr + " MB (" + pctStr + "%)";
alertType = ALERT_TYPE_SECONDARY_STORAGE;
break;
case CapacityVO.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP:
msgSubject = "System Alert: Number of unallocated virtual network public IPs is low in availablity zone " + dc.getName();
msgSubject = "System Alert: Number of unallocated virtual network public IPs is low in availability zone " + dc.getName();
totalStr = Double.toString(totalCapacity);
usedStr = Double.toString(usedCapacity);
msgContent = "Number of unallocated public IPs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";
alertType = ALERT_TYPE_VIRTUAL_NETWORK_PUBLIC_IP;
break;
case CapacityVO.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP:
msgSubject = "System Alert: Number of unallocated direct attached public IPs is low in availablity zone " + dc.getName();
msgSubject = "System Alert: Number of unallocated shared network IPs is low in availability zone " + dc.getName();
totalStr = Double.toString(totalCapacity);
usedStr = Double.toString(usedCapacity);
msgContent = "Number of unallocated direct attached public IPs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";
msgContent = "Number of unallocated shared network IPs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";
alertType = ALERT_TYPE_DIRECT_ATTACHED_PUBLIC_IP;
break;
case CapacityVO.CAPACITY_TYPE_VLAN:
msgSubject = "System Alert: Number of unallocated VLANs is low in availablity zone " + dc.getName();
msgSubject = "System Alert: Number of unallocated VLANs is low in availability zone " + dc.getName();
totalStr = Double.toString(totalCapacity);
usedStr = Double.toString(usedCapacity);
msgContent = "Number of unallocated VLANs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";

View File

@ -33,8 +33,8 @@ import com.cloud.api.ApiDBUtils;
import com.cloud.api.ApiResponseGsonHelper;
import com.cloud.api.ApiServer;
import com.cloud.api.BaseCmd;
import com.cloud.utils.IdentityProxy;
import com.cloud.api.ResponseObject;
import com.cloud.utils.IdentityProxy;
import com.cloud.utils.encoding.URLEncoder;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.uuididentity.dao.IdentityDao;
@ -70,7 +70,7 @@ public class ApiResponseSerializer {
StringBuilder sb = new StringBuilder();
sb.append("{ \"" + result.getResponseName() + "\" : ");
sb.append("{ \"").append(result.getResponseName()).append("\" : ");
if (result instanceof ListResponse) {
List<? extends ResponseObject> responses = ((ListResponse) result).getResponses();
if ((responses != null) && !responses.isEmpty()) {
@ -80,19 +80,20 @@ public class ApiResponseSerializer {
jsonStr = unescape(jsonStr);
if (count != null && count != 0) {
sb.append("{ \"" + ApiConstants.COUNT + "\":" + count + " ,\"" + responses.get(0).getObjectName() + "\" : [ " + jsonStr);
sb.append("{ \"").append(ApiConstants.COUNT).append("\":").append(count).append(" ,\"").
append(responses.get(0).getObjectName()).append("\" : [ ").append(jsonStr);
}
for (int i = 1; i < ((ListResponse) result).getResponses().size(); i++) {
jsonStr = gson.toJson(responses.get(i));
jsonStr = unescape(jsonStr);
sb.append(", " + jsonStr);
sb.append(", ").append(jsonStr);
}
sb.append(" ] }");
} else {
sb.append("{ }");
}
} else if (result instanceof SuccessResponse) {
sb.append("{ \"success\" : \"" + ((SuccessResponse) result).getSuccess() + "\"} ");
sb.append("{ \"success\" : \"").append(((SuccessResponse) result).getSuccess()).append("\"} ");
} else if (result instanceof ExceptionResponse) {
String jsonErrorText = gson.toJson((ExceptionResponse) result);
jsonErrorText = unescape(jsonErrorText);
@ -104,7 +105,7 @@ public class ApiResponseSerializer {
if (result instanceof AsyncJobResponse || result instanceof CreateCmdResponse) {
sb.append(jsonStr);
} else {
sb.append(" { \"" + result.getObjectName() + "\" : " + jsonStr + " } ");
sb.append(" { \"").append(result.getObjectName()).append("\" : ").append(jsonStr).append(" } ");
}
} else {
sb.append("{ }");
@ -119,13 +120,14 @@ public class ApiResponseSerializer {
private static String toXMLSerializedString(ResponseObject result) {
StringBuilder sb = new StringBuilder();
sb.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
sb.append("<" + result.getResponseName() + " cloud-stack-version=\"" + ApiDBUtils.getVersion() + "\">");
sb.append("<").append(result.getResponseName()).append(" cloud-stack-version=\"").append(ApiDBUtils.getVersion()).append("\">");
if (result instanceof ListResponse) {
Integer count = ((ListResponse) result).getCount();
if (count != null && count != 0) {
sb.append("<" + ApiConstants.COUNT + ">" + ((ListResponse) result).getCount() + "</" + ApiConstants.COUNT + ">");
sb.append("<").append(ApiConstants.COUNT).append(">").append(((ListResponse) result).getCount()).
append("</").append(ApiConstants.COUNT).append(">");
}
List<? extends ResponseObject> responses = ((ListResponse) result).getResponses();
if ((responses != null) && !responses.isEmpty()) {
@ -141,17 +143,17 @@ public class ApiResponseSerializer {
}
}
sb.append("</" + result.getResponseName() + ">");
sb.append("</").append(result.getResponseName()).append(">");
return sb.toString();
}
private static void serializeResponseObjXML(StringBuilder sb, ResponseObject obj) {
if (!(obj instanceof SuccessResponse) && !(obj instanceof ExceptionResponse)) {
sb.append("<" + obj.getObjectName() + ">");
sb.append("<").append(obj.getObjectName()).append(">");
}
serializeResponseObjFieldsXML(sb, obj);
if (!(obj instanceof SuccessResponse) && !(obj instanceof ExceptionResponse)) {
sb.append("</" + obj.getObjectName() + ">");
sb.append("</").append(obj.getObjectName()).append(">");
}
}
@ -221,24 +223,24 @@ public class ApiResponseSerializer {
if(id != null && !id.isEmpty()) {
// If this is the first IdentityProxy field encountered, put in a uuidList tag.
if (!usedUuidList) {
sb.append("<" + serializedName.value() + ">");
sb.append("<").append(serializedName.value()).append(">");
usedUuidList = true;
}
sb.append("<" + "uuid" + ">" + id + "</" + "uuid" + ">");
sb.append("<uuid>").append(id).append("</uuid>");
}
// Append the new idFieldName property also.
String idFieldName = idProxy.getidFieldName();
if (idFieldName != null) {
sb.append("<" + "uuidProperty" + ">" + idFieldName + "</" + "uuidProperty" + ">");
sb.append("<uuidProperty>").append(idFieldName).append("</uuidProperty>");
}
}
}
if (usedUuidList) {
// close the uuidList.
sb.append("</" + serializedName.value() + ">");
sb.append("</").append(serializedName.value()).append(">");
}
} else if (fieldValue instanceof Date) {
sb.append("<" + serializedName.value() + ">" + BaseCmd.getDateString((Date) fieldValue) + "</" + serializedName.value() + ">");
sb.append("<").append(">").append(BaseCmd.getDateString((Date) fieldValue)).append("</").append(serializedName.value()).append(">");
} else if (fieldValue instanceof IdentityProxy) {
IdentityProxy idProxy = (IdentityProxy)fieldValue;
String id = (idProxy.getValue() != null ? String.valueOf(idProxy.getValue()) : "");
@ -251,14 +253,14 @@ public class ApiResponseSerializer {
}
}
if(id != null && !id.isEmpty())
sb.append("<" + serializedName.value() + ">" + id + "</" + serializedName.value() + ">");
sb.append("<").append(serializedName.value()).append(">").append(id).append("</").append(serializedName.value()).append(">");
} else {
String resultString = escapeSpecialXmlChars(fieldValue.toString());
if (!(obj instanceof ExceptionResponse)) {
resultString = encodeParam(resultString);
}
sb.append("<" + serializedName.value() + ">" + resultString + "</" + serializedName.value() + ">");
sb.append("<").append(serializedName.value()).append(">").append(resultString).append("</").append(serializedName.value()).append(">");
}
}
}

View File

@ -270,7 +270,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
Random random = new Random();
for(int i = 0; i < 5; i++) {
queue = _queueMgr.queue(syncObjType, syncObjId, "AsyncJob", job.getId(), queueSizeLimit);
queue = _queueMgr.queue(syncObjType, syncObjId, SyncQueueItem.AsyncJobContentType, job.getId(), queueSizeLimit);
if(queue != null) {
break;
}
@ -598,60 +598,73 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
return new Runnable() {
@Override
public void run() {
GlobalLock scanLock = GlobalLock.getInternLock("AsyncJobManagerGC");
try {
if(scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
try {
reallyRun();
} finally {
scanLock.unlock();
}
}
} finally {
scanLock.releaseRef();
}
}
private void reallyRun() {
try {
s_logger.trace("Begin cleanup expired async-jobs");
Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - _jobExpireSeconds*1000);
// limit to 100 jobs per turn, this gives cleanup throughput as 600 jobs per minute
// hopefully this will be fast enough to balance potential growth of job table
List<AsyncJobVO> l = _jobDao.getExpiredJobs(cutTime, 100);
if(l != null && l.size() > 0) {
for(AsyncJobVO job : l) {
_jobDao.expunge(job.getId());
}
}
// forcely cancel blocking queue items if they've been staying there for too long
List<SyncQueueItemVO> blockItems = _queueMgr.getBlockedQueueItems(_jobCancelThresholdSeconds*1000, false);
if(blockItems != null && blockItems.size() > 0) {
for(SyncQueueItemVO item : blockItems) {
if(item.getContentType().equalsIgnoreCase("AsyncJob")) {
completeAsyncJob(item.getContentId(), AsyncJobResult.STATUS_FAILED, 0, getResetResultResponse("Job is cancelled as it has been blocking others for too long"));
GlobalLock scanLock = GlobalLock.getInternLock("AsyncJobManagerGC");
try {
if(scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
try {
reallyRun();
} finally {
scanLock.unlock();
}
}
} finally {
scanLock.releaseRef();
}
}
public void reallyRun() {
try {
s_logger.trace("Begin cleanup expired async-jobs");
Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - _jobExpireSeconds*1000);
// limit to 100 jobs per turn, this gives cleanup throughput as 600 jobs per minute
// hopefully this will be fast enough to balance potential growth of job table
List<AsyncJobVO> l = _jobDao.getExpiredJobs(cutTime, 100);
if(l != null && l.size() > 0) {
for(AsyncJobVO job : l) {
expungeAsyncJob(job);
}
}
// forcefully cancel blocking queue items if they've been staying there for too long
List<SyncQueueItemVO> blockItems = _queueMgr.getBlockedQueueItems(_jobCancelThresholdSeconds*1000, false);
if(blockItems != null && blockItems.size() > 0) {
for(SyncQueueItemVO item : blockItems) {
if(item.getContentType().equalsIgnoreCase(SyncQueueItem.AsyncJobContentType)) {
completeAsyncJob(item.getContentId(), AsyncJobResult.STATUS_FAILED, 0,
getResetResultResponse("Job is cancelled as it has been blocking others for too long"));
}
// purge the item and resume queue processing
_queueMgr.purgeItem(item.getId());
}
}
s_logger.trace("End cleanup expired async-jobs");
} catch(Throwable e) {
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
} finally {
StackMaid.current().exitCleanup();
}
}
};
}
private long getMsid() {
if(_clusterMgr != null) {
// purge the item and resume queue processing
_queueMgr.purgeItem(item.getId());
}
}
s_logger.trace("End cleanup expired async-jobs");
} catch(Throwable e) {
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
} finally {
StackMaid.current().exitCleanup();
}
}
};
}
@DB
protected void expungeAsyncJob(AsyncJobVO job) {
Transaction txn = Transaction.currentTxn();
txn.start();
_jobDao.expunge(job.getId());
//purge corresponding sync queue item
_queueMgr.purgeAsyncJobQueueItemId(job.getId());
txn.commit();
}
private long getMsid() {
if(_clusterMgr != null) {
return _clusterMgr.getManagementNodeId();
}
@ -666,7 +679,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
}
String contentType = item.getContentType();
if(contentType != null && contentType.equals("AsyncJob")) {
if(contentType != null && contentType.equalsIgnoreCase(SyncQueueItem.AsyncJobContentType)) {
Long jobId = item.getContentId();
if(jobId != null) {
s_logger.warn("Mark job as failed as its correspoding queue-item has been discarded. job id: " + jobId);

View File

@ -30,4 +30,6 @@ public interface SyncQueueManager extends Manager {
public List<SyncQueueItemVO> getActiveQueueItems(Long msid, boolean exclusive);
public List<SyncQueueItemVO> getBlockedQueueItems(long thresholdMs, boolean exclusive);
void purgeAsyncJobQueueItemId(long asyncJobId);
}

View File

@ -185,13 +185,16 @@ public class SyncQueueManagerImpl implements SyncQueueManager {
if(itemVO != null) {
SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true);
_syncQueueItemDao.expunge(itemVO.getId());
queueVO.setLastUpdated(DateUtil.currentGMTTime());
//decrement the count
assert (queueVO.getQueueSize() > 0) : "Count reduce happens when it's already <= 0!";
queueVO.setQueueSize(queueVO.getQueueSize() - 1);
_syncQueueDao.update(queueVO.getId(), queueVO);
_syncQueueItemDao.expunge(itemVO.getId());
//if item is active, reset queue information
if (itemVO.getLastProcessMsid() != null) {
queueVO.setLastUpdated(DateUtil.currentGMTTime());
//decrement the count
assert (queueVO.getQueueSize() > 0) : "Count reduce happens when it's already <= 0!";
queueVO.setQueueSize(queueVO.getQueueSize() - 1);
_syncQueueDao.update(queueVO.getId(), queueVO);
}
}
txt.commit();
} catch(Exception e) {
@ -273,5 +276,13 @@ public class SyncQueueManagerImpl implements SyncQueueManager {
private boolean queueReadyToProcess(SyncQueueVO queueVO) {
return queueVO.getQueueSize() < queueVO.getQueueSizeLimit();
}
@Override
public void purgeAsyncJobQueueItemId(long asyncJobId) {
Long itemId = _syncQueueItemDao.getQueueItemIdByContentIdAndType(asyncJobId, SyncQueueItem.AsyncJobContentType);
if (itemId != null) {
purgeItem(itemId);
}
}
}
}

View File

@ -26,4 +26,5 @@ public interface SyncQueueItemDao extends GenericDao<SyncQueueItemVO, Long> {
public List<SyncQueueItemVO> getNextQueueItems(int maxItems);
public List<SyncQueueItemVO> getActiveQueueItems(Long msid, boolean exclusive);
public List<SyncQueueItemVO> getBlockedQueueItems(long thresholdMs, boolean exclusive);
}
public Long getQueueItemIdByContentIdAndType(long contentId, String contentType);
}

View File

@ -33,13 +33,25 @@ import com.cloud.async.SyncQueueItemVO;
import com.cloud.utils.DateUtil;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.Transaction;
@Local(value = { SyncQueueItemDao.class })
public class SyncQueueItemDaoImpl extends GenericDaoBase<SyncQueueItemVO, Long> implements SyncQueueItemDao {
private static final Logger s_logger = Logger.getLogger(SyncQueueItemDaoImpl.class);
final GenericSearchBuilder<SyncQueueItemVO, Long> queueIdSearch;
protected SyncQueueItemDaoImpl() {
super();
queueIdSearch = createSearchBuilder(Long.class);
queueIdSearch.and("contentId", queueIdSearch.entity().getContentId(), Op.EQ);
queueIdSearch.and("contentType", queueIdSearch.entity().getContentType(), Op.EQ);
queueIdSearch.selectField(queueIdSearch.entity().getId());
queueIdSearch.done();
}
@Override
@ -132,4 +144,15 @@ public class SyncQueueItemDaoImpl extends GenericDaoBase<SyncQueueItemVO, Long>
return lockRows(sc, null, true);
return listBy(sc, null);
}
@Override
public Long getQueueItemIdByContentIdAndType(long contentId, String contentType) {
SearchCriteria<Long> sc = queueIdSearch.create();
sc.setParameters("contentId", contentId);
sc.setParameters("contentType", contentType);
List<Long> id = customSearch(sc, null);
return id.size() == 0 ? null : id.get(0);
}
}

View File

@ -2105,7 +2105,8 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag
List<NicVO> nics = _nicDao.listByVmId(vmProfile.getId());
// we have to implement default nics first - to ensure that default network elements start up first in multiple
// nics case)(need for setting DNS on Dhcp to domR's Ip4 address)
//nics case
// (need for setting DNS on Dhcp to domR's Ip4 address)
Collections.sort(nics, new Comparator<NicVO>() {
@Override
@ -2204,6 +2205,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag
}
@Override
@DB
public void release(VirtualMachineProfile<? extends VMInstanceVO> vmProfile, boolean forced) throws
ConcurrentOperationException, ResourceUnavailableException {
List<NicVO> nics = _nicDao.listByVmId(vmProfile.getId());
@ -2211,19 +2213,32 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag
releaseNic(vmProfile, nic);
}
}
@Override
@DB
public void releaseNic(VirtualMachineProfile<? extends VMInstanceVO> vmProfile, Nic nic)
throws ConcurrentOperationException, ResourceUnavailableException {
NicVO nicVO = _nicDao.findById(nic.getId());
releaseNic(vmProfile, nicVO);
}
protected void releaseNic(VirtualMachineProfile<? extends VMInstanceVO> vmProfile, NicVO nic)
@DB
protected void releaseNic(VirtualMachineProfile<? extends VMInstanceVO> vmProfile, NicVO nicVO)
throws ConcurrentOperationException, ResourceUnavailableException {
NetworkVO network = _networksDao.findById(nic.getNetworkId());
if (nic.getState() == Nic.State.Reserved || nic.getState() == Nic.State.Reserving) {
Nic.State originalState = nic.getState();
//lock the nic
Transaction txn = Transaction.currentTxn();
txn.start();
NicVO nic = _nicDao.lockRow(nicVO.getId(), true);
if (nic == null) {
throw new ConcurrentOperationException("Unable to acquire lock on nic " + nic);
}
Nic.State originalState = nic.getState();
NetworkVO network = _networksDao.findById(nicVO.getNetworkId());
if (originalState == Nic.State.Reserved || originalState == Nic.State.Reserving) {
if (nic.getReservationStrategy() == Nic.ReservationStrategy.Start) {
NetworkGuru guru = _networkGurus.get(network.getGuruName());
nic.setState(Nic.State.Releasing);
@ -2239,6 +2254,9 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag
_nicDao.update(nic.getId(), nic);
}
}
//commit the transaction before proceeding releasing nic profile on the network elements
txn.commit();
// Perform release on network elements
for (NetworkElement element : _networkElements) {
if (s_logger.isDebugEnabled()) {
@ -2252,6 +2270,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag
} else {
nic.setState(Nic.State.Allocated);
updateNic(nic, network.getId(), -1);
txn.commit();
}
}
}

View File

@ -301,9 +301,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian
result = result && _itMgr.removeVmFromNetwork(router, network, null);
if (result) {
if (result) {
_routerDao.removeRouterFromGuestNetwork(router.getId(), network.getId());
}
_routerDao.removeRouterFromGuestNetwork(router.getId(), network.getId());
}
return result;
}

View File

@ -32,6 +32,7 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -85,6 +86,7 @@ import com.cloud.offerings.NetworkOfferingServiceMapVO;
import com.cloud.offerings.NetworkOfferingVO;
import com.cloud.offerings.dao.NetworkOfferingDao;
import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
import com.cloud.server.auth.UserAuthenticator;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.DiskOfferingVO;
@ -96,6 +98,7 @@ import com.cloud.user.User;
import com.cloud.user.dao.AccountDao;
import com.cloud.utils.PasswordGenerator;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.component.Adapters;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.db.DB;
@ -342,30 +345,13 @@ public class ConfigurationServerImpl implements ConfigurationServer {
} catch (SQLException ex) {
}
// insert admin user
// insert admin user, but leave the account disabled until we set a
// password with the user authenticator
long id = 2;
String username = "admin";
String firstname = "admin";
String lastname = "cloud";
String password = "password";
MessageDigest md5 = null;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
return;
}
md5.reset();
BigInteger pwInt = new BigInteger(1, md5.digest(password.getBytes()));
String pwStr = pwInt.toString(16);
int padding = 32 - pwStr.length();
StringBuffer sb = new StringBuffer();
for (int i = 0; i < padding; i++) {
sb.append('0'); // make sure the MD5 password is 32 digits long
}
sb.append(pwStr);
// create an account for the admin user first
insertSql = "INSERT INTO `cloud`.`account` (id, account_name, type, domain_id) VALUES (" + id + ", '" + username + "', '1', '1')";
txn = Transaction.currentTxn();
@ -376,8 +362,8 @@ public class ConfigurationServerImpl implements ConfigurationServer {
}
// now insert the user
insertSql = "INSERT INTO `cloud`.`user` (id, username, password, account_id, firstname, lastname, created) " +
"VALUES (" + id + ",'" + username + "','" + sb.toString() + "', 2, '" + firstname + "','" + lastname + "',now())";
insertSql = "INSERT INTO `cloud`.`user` (id, username, account_id, firstname, lastname, created, state) " +
"VALUES (" + id + ",'" + username + "', 2, '" + firstname + "','" + lastname + "',now(), 'disabled')";
txn = Transaction.currentTxn();
try {
@ -584,6 +570,7 @@ public class ConfigurationServerImpl implements ConfigurationServer {
String username = System.getProperty("user.name");
Boolean devel = Boolean.valueOf(_configDao.getValue("developer"));
if (!username.equalsIgnoreCase("cloud") && !devel) {
s_logger.warn("Systemvm keypairs could not be set. Management server should be run as cloud user, or in development mode.");
return;
}
String already = _configDao.getValue("ssh.privatekey");

View File

@ -95,4 +95,6 @@ public interface ManagementServer extends ManagementService {
Pair<List<StoragePoolVO>, Integer> searchForStoragePools(Criteria c);
String getHashKey();
public void enableAdminUser(String password);
}

View File

@ -177,6 +177,7 @@ import com.cloud.projects.Project.ListProjectResourcesCriteria;
import com.cloud.projects.ProjectManager;
import com.cloud.resource.ResourceManager;
import com.cloud.server.ResourceTag.TaggedResourceType;
import com.cloud.server.auth.UserAuthenticator;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.DiskOfferingVO;
@ -215,7 +216,9 @@ import com.cloud.user.AccountVO;
import com.cloud.user.SSHKeyPair;
import com.cloud.user.SSHKeyPairVO;
import com.cloud.user.User;
import com.cloud.user.UserAccount;
import com.cloud.user.UserContext;
import com.cloud.user.UserVO;
import com.cloud.user.dao.AccountDao;
import com.cloud.user.dao.SSHKeyPairDao;
import com.cloud.user.dao.UserDao;
@ -338,6 +341,8 @@ public class ManagementServerImpl implements ManagementServer {
private final StatsCollector _statsCollector;
private final Map<String, Boolean> _availableIdsMap;
private Adapters<UserAuthenticator> _userAuthenticators;
private String _hashKey = null;
@ -417,6 +422,11 @@ public class ManagementServerImpl implements ManagementServer {
for (String id : availableIds) {
_availableIdsMap.put(id, true);
}
_userAuthenticators = locator.getAdapters(UserAuthenticator.class);
if (_userAuthenticators == null || !_userAuthenticators.isSet()) {
s_logger.error("Unable to find an user authenticator.");
}
}
protected Map<String, String> getConfigs() {
@ -3587,5 +3597,28 @@ public class ManagementServerImpl implements ManagementServer {
}
}
public void enableAdminUser(String password) {
String encodedPassword = null;
UserVO adminUser = _userDao.getUser(2);
if (adminUser.getState() == Account.State.disabled) {
// This means its a new account, set the password using the authenticator
for (Enumeration<UserAuthenticator> en = _userAuthenticators.enumeration(); en.hasMoreElements();) {
UserAuthenticator authenticator = en.nextElement();
encodedPassword = authenticator.encode(password);
if (encodedPassword != null) {
break;
}
}
adminUser.setPassword(encodedPassword);
adminUser.setState(Account.State.enabled);
_userDao.persist(adminUser);
s_logger.info("Admin user enabled");
}
}
}

View File

@ -34,4 +34,10 @@ public interface UserAuthenticator extends Adapter {
* @return true if the user has been successfully authenticated, false otherwise
*/
public boolean authenticate(String username, String password, Long domainId, Map<String, Object[]> requestParameters);
/**
* @param password
* @return the encoded password
*/
public String encode(String password);
}

View File

@ -47,6 +47,7 @@ public class CloudStartupServlet extends HttpServlet implements ServletContextLi
c.persistDefaultValues();
s_locator = ComponentLocator.getLocator(ManagementServer.Name);
ManagementServer ms = (ManagementServer)ComponentLocator.getComponent(ManagementServer.Name);
ms.enableAdminUser("password");
ApiServer.initApiServer(ms.getApiConfig());
} catch (InvalidParameterValueException ipve) {
s_logger.error("Exception starting management server ", ipve);

View File

@ -921,7 +921,18 @@ public class AccountManagerImpl implements AccountManager, AccountService, Manag
}
if (password != null) {
user.setPassword(password);
String encodedPassword = null;
for (Enumeration<UserAuthenticator> en = _userAuthenticators.enumeration(); en.hasMoreElements();) {
UserAuthenticator authenticator = en.nextElement();
encodedPassword = authenticator.encode(password);
if (encodedPassword != null) {
break;
}
}
if (encodedPassword == null) {
throw new CloudRuntimeException("Failed to encode password");
}
user.setPassword(encodedPassword);
}
if (email != null) {
user.setEmail(email);
@ -1670,7 +1681,20 @@ public class AccountManagerImpl implements AccountManager, AccountService, Manag
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating user: " + userName + ", accountId: " + accountId + " timezone:" + timezone);
}
UserVO user = _userDao.persist(new UserVO(accountId, userName, password, firstName, lastName, email, timezone));
String encodedPassword = null;
for (Enumeration<UserAuthenticator> en = _userAuthenticators.enumeration(); en.hasMoreElements();) {
UserAuthenticator authenticator = en.nextElement();
encodedPassword = authenticator.encode(password);
if (encodedPassword != null) {
break;
}
}
if (encodedPassword == null) {
throw new CloudRuntimeException("Failed to encode password");
}
UserVO user = _userDao.persist(new UserVO(accountId, userName, encodedPassword, firstName, lastName, email, timezone));
return user;
}

View File

@ -5,9 +5,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@ -28,35 +28,14 @@ from nose.plugins.attrib import attr
#Import System modules
import time
class Services:
"""Test secondary storage Services
"""
def __init__(self):
self.services = {
"storage": {
"url": "nfs://192.168.100.131/SecStorage"
# Format: File_System_Type/Location/Path
},
"hypervisors": {
0: {
"hypervisor": "XenServer",
"templatefilter": "self",
},
},
"sleep": 60,
"timeout": 5,
}
class TestSecStorageServices(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestSecStorageServices, cls).getClsTestClient().getApiClient()
cls.services = Services().services
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
@ -65,16 +44,32 @@ class TestSecStorageServices(cloudstackTestCase):
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
self.services = Services().services
# Get Zone and pod
self.domain = get_domain(self.apiclient, self.services)
self.zone = get_zone(self.apiclient, self.services)
self.pod = get_pod(self.apiclient, self.zone.id)
self.zones = []
self.pods = []
for zone in self.config.zones:
cmd = listZones.listZonesCmd()
cmd.name = zone.name
z = self.apiclient.listZones(cmd)
if isinstance(z, list) and len(z) > 0:
self.zones.append(z[0].id)
for pod in zone.pods:
podcmd = listPods.listPodsCmd()
podcmd.zoneid = z[0].id
p = self.apiclient.listPods(podcmd)
if isinstance(p, list) and len(p) >0:
self.pods.append(p[0].id)
self.domains = []
dcmd = listDomains.listDomainsCmd()
domains = self.apiclient.listDomains(dcmd)
assert isinstance(domains, list) and len(domains) > 0
for domain in domains:
self.domains.append(domain.id)
return
def tearDown(self):
@ -85,62 +80,8 @@ class TestSecStorageServices(cloudstackTestCase):
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@unittest.skip("skipped - do not add secondary storage")
def test_01_add_sec_storage(self):
"""Test secondary storage
"""
# Validate the following:
# 1. secondary storage should be added to the zone.
# 2. Verify with listHosts and type secondarystorage
cmd = addSecondaryStorage.addSecondaryStorageCmd()
cmd.zoneid = self.zone.id
cmd.url = self.services["storage"]["url"]
sec_storage = self.apiclient.addSecondaryStorage(cmd)
self.debug("Added secondary storage to zone: %s" % self.zone.id)
# Cleanup at the end
self._cleanup.append(sec_storage)
self.assertEqual(
sec_storage.zoneid,
self.zone.id,
"Check zoneid where sec storage is added"
)
list_hosts_response = list_hosts(
self.apiclient,
type='SecondaryStorage',
id=sec_storage.id
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_hosts_response),
0,
"Check list Hosts response"
)
host_response = list_hosts_response[0]
#Check if host is Up and running
self.assertEqual(
host_response.id,
sec_storage.id,
"Check ID of secondary storage"
)
self.assertEqual(
sec_storage.type,
host_response.type,
"Check type of host from list hosts response"
)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"])
def test_02_sys_vm_start(self):
def test_01_sys_vm_start(self):
"""Test system VM start
"""
@ -152,8 +93,6 @@ class TestSecStorageServices(cloudstackTestCase):
list_hosts_response = list_hosts(
self.apiclient,
type='Routing',
zoneid=self.zone.id,
podid=self.pod.id
)
self.assertEqual(
isinstance(list_hosts_response, list),
@ -176,8 +115,6 @@ class TestSecStorageServices(cloudstackTestCase):
# ListStoragePools shows all primary storage pools in UP state
list_storage_response = list_storage_pools(
self.apiclient,
zoneid=self.zone.id,
podid=self.pod.id
)
self.assertEqual(
isinstance(list_storage_response, list),
@ -197,58 +134,11 @@ class TestSecStorageServices(cloudstackTestCase):
"Check state of primary storage pools is Up or not"
)
# Secondary storage is added successfully
timeout = self.services["timeout"]
while True:
list_hosts_response = list_hosts(
self.apiclient,
type='SecondaryStorageVM',
zoneid=self.zone.id,
)
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
)
if not isinstance(list_hosts_response, list):
# Sleep to ensure Secondary storage is Up
time.sleep(int(self.services["sleep"]))
timeout = timeout - 1
elif timeout == 0 or isinstance(list_hosts_response, list):
break
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_hosts_response),
0,
"Check list Hosts response"
)
host_response = list_hosts_response[0]
#Check if host is Up and running
self.assertEqual(
host_response.state,
'Up',
"Check state of secondary storage"
)
self.debug("Checking SSVM status in zone: %s" % self.zone.id)
timeout = self.services["timeout"]
while True:
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
zoneid=self.zone.id,
)
if not isinstance(list_ssvm_response, list):
# Sleep to ensure SSVMs are Up and Running
time.sleep(int(self.services["sleep"]))
timeout = timeout - 1
elif timeout == 0 or isinstance(list_ssvm_response, list):
break
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
@ -270,7 +160,7 @@ class TestSecStorageServices(cloudstackTestCase):
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"])
def test_03_sys_template_ready(self):
def test_02_sys_template_ready(self):
"""Test system templates are ready
"""
@ -279,100 +169,50 @@ class TestSecStorageServices(cloudstackTestCase):
# 1. wait for listTemplates to show all builtin templates downloaded and
# in Ready state
for k, v in self.services["hypervisors"].items():
hypervisors = {}
for zone in self.config.zones:
for pod in zone.pods:
for cluster in pod.clusters:
hypervisors[cluster.hypervisor] = "self"
self.debug("Downloading BUILTIN templates in zone: %s" %
self.zone.id)
list_template_response = list_templates(
self.apiclient,
hypervisor=v["hypervisor"],
zoneid=self.zone.id,
templatefilter=v["templatefilter"],
listall=True,
account='system',
domainid=self.domain.id
)
for zid in self.zones:
for k, v in hypervisors.items():
self.debug("Checking BUILTIN templates in zone: %s" %zid)
list_template_response = list_templates(
self.apiclient,
hypervisor=k,
zoneid=zid,
templatefilter=v,
listall=True,
account='system'
)
# Ensure all BUILTIN templates are downloaded
templateid = None
for template in list_template_response:
if template.templatetype == "BUILTIN":
templateid = template.id
# Ensure all BUILTIN templates are downloaded
templateid = None
for template in list_template_response:
if template.templatetype == "BUILTIN":
templateid = template.id
# Wait to start a downloading of template
time.sleep(self.services["sleep"])
while True and (templateid != None):
timeout = self.services["timeout"]
while True:
template_response = list_templates(
self.apiclient,
id=templateid,
zoneid=self.zone.id,
templatefilter=v["templatefilter"],
zoneid=zid,
templatefilter=v,
listall=True,
account='system',
domainid=self.domain.id
account='system'
)
if isinstance(template_response, list):
template = template_response[0]
break
elif timeout == 0:
raise Exception("List template API call failed.")
time.sleep(1)
timeout = timeout - 1
# If template is ready,
# template.status = Download Complete
# Downloading - x% Downloaded
# Error - Any other string
if template.status == 'Download Complete' :
break
elif 'Downloaded' not in template.status.split():
raise Exception
elif 'Downloaded' in template.status.split():
time.sleep(self.services["sleep"])
else:
raise Exception("ListTemplate API returned invalid list")
#Ensuring the template is in ready state
time.sleep(self.services["sleep"])
timeout = self.services["timeout"]
while True:
template_response = list_templates(
self.apiclient,
id=templateid,
zoneid=self.zone.id,
templatefilter=v["templatefilter"],
listall=True,
account='system',
domainid=self.domain.id
)
if isinstance(template_response, list):
template = template_response[0]
break
elif timeout == 0:
raise Exception("List template API call failed.")
time.sleep(1)
timeout = timeout - 1
self.assertEqual(
isinstance(template_response, list),
True,
"Check list response returns a valid list"
)
template = template_response[0]
if template.status == 'Download Complete':
self.debug("Template %s is ready in zone %s"%(template.templatetype, zid))
elif 'Downloaded' not in template.status.split():
self.debug("templates status is %s"%template.status)
self.assertEqual(
template.isready,
True,
"Check whether state of template is ready or not"
)
return
self.assertEqual(
template.isready,
True,
"Builtin template is not ready %s in zone %s"%(template.status, zid)
)

View File

@ -1,4 +1,22 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="html" doctype-public="-//W3C//DTD HTML 1.0 Transitional//EN"/>

View File

@ -1,4 +1,22 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="html" doctype-public="-//W3C//DTD HTML 1.0 Transitional//EN"/>

View File

@ -1,4 +1,22 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="html" doctype-public="-//W3C//DTD HTML 1.0 Transitional//EN"/>

View File

@ -1,3 +1,21 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
</div>
</div>
</div>

View File

@ -1,4 +1,22 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="html" doctype-public="-//W3C//DTD HTML 1.0 Transitional//EN"/>

View File

@ -0,0 +1,26 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Use following rules for versioning:
# <cli major version>.<cloudstack minor version>.<cloudstack major version>
# Example: For CloudStack 4.1.x, CLI version should be 0.1.4
__version__ = "0.0.4"
try:
from cloudmonkey import *
except ImportError, e:
print e

View File

@ -0,0 +1,382 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import atexit
import cmd
import clint
import codecs
import logging
import os
import pdb
import readline
import rlcompleter
import sys
import types
from clint.textui import colored
from ConfigParser import ConfigParser, SafeConfigParser
from marvin.cloudstackConnection import cloudConnection
from marvin.cloudstackException import cloudstackAPIException
from marvin.cloudstackAPI import *
from marvin import cloudstackAPI
except ImportError, e:
print "Import error in %s : %s" % (__name__, e)
import sys
sys.exit()
log_fmt = '%(asctime)s - %(filename)s:%(lineno)s - [%(levelname)s] %(message)s'
logger = logging.getLogger(__name__)
completions = cloudstackAPI.__all__
class CloudStackShell(cmd.Cmd):
intro = "☁ Apache CloudStack CLI. Type help or ? to list commands.\n"
ruler = "-"
config_file = os.path.expanduser('~/.cloudmonkey_config')
grammar = []
# datastructure {'list': {'users': ['listUsers', [params], docstring]}}
cache_verbs = {}
def __init__(self):
self.config_fields = {'host': 'localhost', 'port': '8080',
'apiKey': '', 'secretKey': '',
'prompt': '🙉 cloudmonkey> ', 'color': 'true',
'log_file':
os.path.expanduser('~/.cloudmonkey_log'),
'history_file':
os.path.expanduser('~/.cloudmonkey_history')}
if os.path.exists(self.config_file):
config = self.read_config()
else:
for key in self.config_fields.keys():
setattr(self, key, self.config_fields[key])
config = self.write_config()
print("Set your apiKey, secretKey, host, port, prompt, color, "
"log_file, history_file using the set command!")
for key in self.config_fields.keys():
setattr(self, key, config.get('CLI', key))
self.prompt += " " # Cosmetic fix for prompt
logging.basicConfig(filename=self.log_file,
level=logging.DEBUG, format=log_fmt)
self.logger = logging.getLogger(self.__class__.__name__)
cmd.Cmd.__init__(self)
# Update config if config_file does not exist
if not os.path.exists(self.config_file):
config = self.write_config()
# Fix autocompletion issue
if sys.platform == "darwin":
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
# Enable history support
try:
if os.path.exists(self.history_file):
readline.read_history_file(self.history_file)
atexit.register(readline.write_history_file, self.history_file)
except IOError:
print("Error: history support")
def read_config(self):
config = ConfigParser()
try:
with open(self.config_file, 'r') as cfg:
config.readfp(cfg)
for section in config.sections():
for option in config.options(section):
logger.debug("[%s] %s=%s" % (section, option,
config.get(section, option)))
except IOError, e:
self.print_shell("Error: config_file not found", e)
return config
def write_config(self):
config = ConfigParser()
config.add_section('CLI')
for key in self.config_fields.keys():
config.set('CLI', key, getattr(self, key))
with open(self.config_file, 'w') as cfg:
config.write(cfg)
return config
def emptyline(self):
pass
def set_grammar(self, grammar):
self.grammar = grammar
def print_shell(self, *args):
try:
for arg in args:
if isinstance(type(args), types.NoneType):
continue
if self.color == 'true':
if str(arg).count(self.ruler) == len(str(arg)):
print colored.green(arg),
elif 'type' in arg:
print colored.green(arg),
elif 'state' in arg:
print colored.yellow(arg),
elif 'id =' in arg:
print colored.cyan(arg),
elif 'name =' in arg:
print colored.magenta(arg),
elif 'Error' in arg:
print colored.red(arg),
elif ':' in arg:
print colored.blue(arg),
else:
print arg,
else:
print arg,
print
except Exception, e:
print colored.red("Error: "), e
# FIXME: Fix result processing and printing
def print_result(self, result, response, api_mod):
def print_result_as_list():
if result is None:
return
for node in result:
print_result_as_instance(node)
def print_result_as_instance(node):
for attribute in dir(response):
if "__" not in attribute:
attribute_value = getattr(node, attribute)
if isinstance(attribute_value, list):
self.print_shell("\n%s:" % attribute)
try:
self.print_result(attribute_value,
getattr(api_mod, attribute)(),
api_mod)
except AttributeError, e:
pass
elif attribute_value is not None:
self.print_shell("%s = %s" %
(attribute, attribute_value))
self.print_shell(self.ruler * 80)
if result is None:
return
if type(result) is types.InstanceType:
print_result_as_instance(result)
elif isinstance(result, list):
print_result_as_list()
elif isinstance(result, str):
print result
elif isinstance(type(result), types.NoneType):
print_result_as_instance(result)
elif not (str(result) is None):
self.print_shell(result)
def do_quit(self, s):
"""
Quit Apache CloudStack CLI
"""
self.print_shell("Bye!")
return True
def do_shell(self, args):
"""
Execute shell commands using shell <command> or !<command>
Example: !ls or shell ls
"""
os.system(args)
def make_request(self, command, requests={}):
conn = cloudConnection(self.host, port=int(self.port),
apiKey=self.apiKey, securityKey=self.secretKey,
logging=logging.getLogger("cloudConnection"))
try:
response = conn.make_request(command, requests)
except cloudstackAPIException, e:
self.print_shell("API Error", e)
return None
return response
def get_api_module(self, api_name, api_class_strs=[]):
try:
api_mod = __import__("marvin.cloudstackAPI.%s" % api_name,
globals(), locals(), api_class_strs, -1)
except ImportError, e:
self.print_shell("Error: API %s not found!" % e)
return None
return api_mod
def default(self, args):
args = args.strip().split(" ")
api_name = args[0]
try:
api_cmd_str = "%sCmd" % api_name
api_rsp_str = "%sResponse" % api_name
api_mod = self.get_api_module(api_name, [api_cmd_str, api_rsp_str])
api_cmd = getattr(api_mod, api_cmd_str)
api_rsp = getattr(api_mod, api_rsp_str)
except AttributeError, e:
self.print_shell("Error: API %s not found!" % e)
return
command = api_cmd()
response = api_rsp()
#FIXME: Parsing logic
args_dict = dict(map(lambda x: x.split("="),
args[1:])[x] for x in range(len(args) - 1))
for attribute in dir(command):
if attribute in args_dict:
setattr(command, attribute, args_dict[attribute])
result = self.make_request(command, response)
try:
self.print_result(result, response, api_mod)
except Exception as e:
self.print_shell("🙈 Error on parsing and printing", e)
def cache_verb_miss(self, verb):
completions_found = filter(lambda x: x.startswith(verb), completions)
self.cache_verbs[verb] = {}
for api_name in completions_found:
try:
api_cmd_str = "%sCmd" % api_name
api_mod = self.get_api_module(api_name, [api_cmd_str])
api_cmd = getattr(api_mod, api_cmd_str)
doc = api_mod.__doc__
except AttributeError, e:
self.print_shell("Error: API attribute %s not found!" % e)
params = filter(lambda x: '__' not in x and 'required' not in x,
dir(api_cmd()))
api_name_lower = api_name.replace(verb, '').lower()
self.cache_verbs[verb][api_name_lower] = [api_name, params, doc]
def completedefault(self, text, line, begidx, endidx):
partitions = line.partition(" ")
verb = partitions[0]
rline = partitions[2].partition(" ")
subject = rline[0]
separator = rline[1]
params = rline[2]
if verb not in self.grammar:
return []
autocompletions = []
search_string = ""
if verb not in self.cache_verbs:
self.cache_verb_miss(verb)
if separator != " ": # Complete verb subjects
autocompletions = self.cache_verbs[verb].keys()
search_string = subject
else: # Complete subject params
autocompletions = self.cache_verbs[verb][subject][1]
search_string = text
return [s for s in autocompletions if s.startswith(search_string)]
def do_api(self, args):
"""
Make raw api calls. Syntax: api <apiName> <args>=<values>. Example:
api listAccount listall=true
"""
if len(args) > 0:
return self.default(args)
else:
self.print_shell("Please use a valid syntax")
def complete_api(self, text, line, begidx, endidx):
mline = line.partition(" ")[2]
offs = len(mline) - len(text)
return [s[offs:] for s in completions if s.startswith(mline)]
def do_set(self, args):
"""
Set config for CloudStack CLI. Available options are:
host, port, apiKey, secretKey, log_file, history_file
"""
args = args.split(' ')
if len(args) == 2:
key, value = args
# Note: keys and fields should have same names
setattr(self, key, value)
self.write_config()
else:
self.print_shell("Please use the syntax: set valid-key value")
def complete_set(self, text, line, begidx, endidx):
mline = line.partition(" ")[2]
offs = len(mline) - len(text)
return [s[offs:] for s in
['host', 'port', 'apiKey', 'secretKey', 'prompt', 'color',
'log_file', 'history_file'] if s.startswith(mline)]
def main():
# Add verbs in grammar
grammar = ['create', 'list', 'delete', 'update',
'enable', 'disable', 'add', 'remove', 'attach', 'detach',
'assign', 'authorize', 'change', 'register',
'start', 'restart', 'reboot', 'stop', 'reconnect',
'cancel', 'destroy', 'revoke',
'copy', 'extract', 'migrate', 'restore',
'get', 'prepare', 'deploy', 'upload']
self = CloudStackShell
for rule in grammar:
def add_grammar(rule):
def grammar_closure(self, args):
if not rule in self.cache_verbs:
self.cache_verb_miss(rule)
try:
args_partition = args.partition(" ")
res = self.cache_verbs[rule][args_partition[0]]
except KeyError, e:
self.print_shell("Error: no such command on %s" % rule)
return
if '--help' in args:
self.print_shell(res[2])
return
self.default(res[0] + " " + args_partition[2])
return grammar_closure
grammar_handler = add_grammar(rule)
grammar_handler.__doc__ = "%ss resources" % rule.capitalize()
grammar_handler.__name__ = 'do_' + rule
setattr(self, grammar_handler.__name__, grammar_handler)
shell = CloudStackShell()
shell.set_grammar(grammar)
if len(sys.argv) > 1:
shell.onecmd(' '.join(sys.argv[1:]))
else:
shell.cmdloop()
if __name__ == "__main__":
main()

75
tools/cli/pom.xml Normal file
View File

@ -0,0 +1,75 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-cli</artifactId>
<name>Apache CloudStack Developer Tools: cloudmonkey cli</name>
<packaging>pom</packaging>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack</artifactId>
<version>4.1.0-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<build>
<defaultGoal>install</defaultGoal>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.2.1</version>
<executions>
<execution>
<id>compile</id>
<phase>compile</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<workingDirectory>${basedir}</workingDirectory>
<executable>cp</executable>
<arguments>
<argument>-rv</argument>
<argument>${basedir}/../marvin/marvin</argument>
<argument>${basedir}/cloudmonkey</argument>
</arguments>
</configuration>
</execution>
<execution>
<id>package</id>
<phase>compile</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<workingDirectory>${basedir}</workingDirectory>
<executable>python</executable>
<arguments>
<argument>setup.py</argument>
<argument>sdist</argument>
</arguments>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

59
tools/cli/setup.py Normal file
View File

@ -0,0 +1,59 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
from setuptools import setup, find_packages
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from cloudmonkey import __version__
name = 'cloudmonkey'
version = __version__
setup(
name = name,
version = version,
author = "The Apache CloudStack Team",
author_email = "cloudstack-dev@incubator.apache.org",
maintainer = "Rohit Yadav",
maintainer_email = "bhaisaab@apache.org",
url = "http://incubator.apache.org/cloudstack",
description = "Command Line Interface for Apache CloudStack",
license = 'ASL 2.0',
packages=find_packages(),
install_requires=['clint'],
include_package_data = True,
zip_safe = False,
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Interpreters",
"Topic :: Utilities",
],
entry_points="""
[console_scripts]
cloudmonkey = cloudmonkey:main
""",
)

View File

@ -69,16 +69,21 @@ class cloudConnection(object):
try:
self.connection = urllib2.urlopen("http://%s:%d/client/api?%s"%(self.mgtSvr, self.port, requestUrl))
self.logging.debug("sending GET request: %s"%requestUrl)
if self.logging is not None:
self.logging.debug("sending GET request: %s"%requestUrl)
response = self.connection.read()
self.logging.info("got response: %s"%response)
if self.logging is not None:
self.logging.info("got response: %s"%response)
except IOError, e:
if hasattr(e, 'reason'):
self.logging.critical("failed to reach %s because of %s"%(self.mgtSvr, e.reason))
if self.logging is not None:
self.logging.critical("failed to reach %s because of %s"%(self.mgtSvr, e.reason))
elif hasattr(e, 'code'):
self.logging.critical("server returned %d error code"%e.code)
if self.logging is not None:
self.logging.critical("server returned %d error code"%e.code)
except httplib.HTTPException, h:
self.logging.debug("encountered http Exception %s"%h.args)
if self.logging is not None:
self.logging.debug("encountered http Exception %s"%h.args)
if self.retries > 0:
self.retries = self.retries - 1
self.make_request_with_auth(command, requests)
@ -95,9 +100,11 @@ class cloudConnection(object):
requestUrl = "&".join(["=".join([request[0], urllib.quote_plus(str(request[1]))]) for request in requests])
self.connection = urllib2.urlopen("http://%s:%d/client/api?%s"%(self.mgtSvr, self.port, requestUrl))
self.logging.debug("sending GET request without auth: %s"%requestUrl)
if self.logging is not None:
self.logging.debug("sending GET request without auth: %s"%requestUrl)
response = self.connection.read()
self.logging.info("got response: %s"%response)
if self.logging is not None:
self.logging.info("got response: %s"%response)
return response
def pollAsyncJob(self, jobId, response):
@ -114,7 +121,8 @@ class cloudConnection(object):
return asyncResonse
time.sleep(5)
self.logging.debug("job: %s still processing, will timeout in %ds"%(jobId, timeout))
if self.logging is not None:
self.logging.debug("job: %s still processing, will timeout in %ds"%(jobId, timeout))
timeout = timeout - 5
raise cloudstackException.cloudstackAPIException("asyncquery", "Async job timeout %s"%jobId)

View File

@ -6816,6 +6816,12 @@ label.error {
width: 98%;
}
.multi-wizard.zone-wizard .multi-edit table th,
.multi-wizard.zone-wizard .multi-edit table td {
min-width: 97px;
max-width: 97px;
}
.multi-wizard.zone-wizard .multi-edit .data {
width: 102%;
float: left;
@ -7181,6 +7187,10 @@ div.panel.ui-dialog div.list-view div.fixed-header {
border-top: none;
}
.multi-edit table th {
min-width: 120px;
}
.detail-group .multi-edit table td {
border-left: 1px solid #CDCCCC;
}
@ -7189,6 +7199,11 @@ div.panel.ui-dialog div.list-view div.fixed-header {
width: 70%;
}
.detail-view .multi-edit select {
width: 93%;
font-size: 10px;
}
.multi-edit input {
width: 85%;
}
@ -7202,9 +7217,10 @@ div.panel.ui-dialog div.list-view div.fixed-header {
}
.multi-edit .range input {
width: 70px;
width: 41px;
margin-left: 13px;
position: relative;
float: left;
}
.multi-edit .range label {
@ -7266,8 +7282,9 @@ div.panel.ui-dialog div.list-view div.fixed-header {
border-radius: 5px;
border-radius: 5px 5px 5px 5px;
width: 74px;
text-indent: 0px;
text-align: center;
padding: 6px 9px 4px 0px;
padding: 6px 0px 4px;
background: url(../images/bg-gradients.png) repeat-x 0px -220px;
/*+placement:shift 4px 0px;*/
position: relative;
@ -7389,10 +7406,18 @@ div.panel.ui-dialog div.list-view div.fixed-header {
border-left: none;
border-right: 1px solid #CFC9C9;
height: 15px;
overflow: hidden;
overflow: auto;
padding-right: 0;
}
.multi-edit .data .data-body .data-item > table tbody tr td span {
overflow-x: auto;
overflow-y: hidden;
max-width: 88px;
display: block;
float: left;
}
.multi-edit .data .data-body .data-item table tbody tr td.blank {
}
@ -7401,6 +7426,7 @@ div.panel.ui-dialog div.list-view div.fixed-header {
}
.multi-edit .data .data-body .data-item table tbody tr td.name span {
width: 53px;
color: #4C5D78;
font-weight: bold;
}
@ -7437,6 +7463,7 @@ div.panel.ui-dialog div.list-view div.fixed-header {
.multi-edit .data .data-body .data-item tr td .expand {
width: 14px;
height: 15px;
display: block;
cursor: pointer;
background: #FFFFFF url(../images/sprites.png) -541px -499px;
border: 1px solid #D0D0D0;
@ -8025,7 +8052,7 @@ div.panel.ui-dialog div.list-view div.fixed-header {
max-width: 98px;
max-height: 21px;
padding: 7px;
font-size: 14px;
font-size: 10px;
position: absolute;
overflow: hidden;
color: #485563;
@ -9525,7 +9552,8 @@ div.panel.ui-dialog div.list-view div.fixed-header {
.tooltip-box {
width: 15%;
height: auto;
display: inline-block, padding: 4px;
display: inline-block;
padding: 4px;
background: #FFFFFF;
border: 1px solid #BEB8B8;
padding: 10px;
@ -10331,6 +10359,13 @@ div.ui-dialog div.acl div.multi-edit div.data div.data-body div.data-item table
max-height: 600px;
}
div.container div.panel div#details-tab-network.detail-group div div.multi-edit table.multi-edit tbody tr td,
div.container div.panel div#details-tab-network.detail-group div div.multi-edit table.multi-edit thead tr th {
min-width: 80px;
max-width: 80px;
font-size: 10px;
}
.ui-dialog div.autoscaler .detail-actions {
}

View File

@ -787,24 +787,19 @@
firstname: { label: 'label.first.name' },
lastname: { label: 'label.last.name' }
},
dataProvider: function(args) {
var array1 = [];
if(args.filterBy != null) {
if(args.filterBy.search != null && args.filterBy.search.by != null && args.filterBy.search.value != null) {
switch(args.filterBy.search.by) {
case "name":
if(args.filterBy.search.value.length > 0)
array1.push("&keyword=" + args.filterBy.search.value);
break;
}
}
}
dataProvider: function(args) {
var accountObj = args.context.accounts[0];
if(isAdmin() || isDomainAdmin()) {
var data = {
domainid: accountObj.domainid,
account: accountObj.name
};
listViewDataProvider(args, data);
$.ajax({
url: createURL("listUsers&domainid=" + accountObj.domainid + "&account=" + todb(accountObj.name) + "&page=" + args.page + "&pagesize=" + pageSize + array1.join("")),
dataType: "json",
url: createURL('listUsers'),
data: data,
success: function(json) {
args.response.success({
actionFilter: userActionfilter,
@ -893,29 +888,39 @@
action: function(args) {
var accountObj = args.context.accounts[0];
var array1 = [];
array1.push("&username=" + todb(args.data.username));
var data = {
username: args.data.username
};
var password = args.data.password;
if (md5Hashed)
password = $.md5(password);
else
password = todb(password);
array1.push("&password=" + password);
array1.push("&email=" + todb(args.data.email));
array1.push("&firstname=" + todb(args.data.firstname));
array1.push("&lastname=" + todb(args.data.lastname));
if(args.data.timezone != null && args.data.timezone.length > 0)
array1.push("&timezone=" + todb(args.data.timezone));
array1.push("&domainid=" + accountObj.domainid);
array1.push("&account=" + todb(accountObj.name));
array1.push("&accounttype=" + accountObj.accounttype);
if (md5Hashed) {
password = $.md5(password);
}
$.extend(data, {
password: password
});
$.extend(data, {
email: args.data.email,
firstname: args.data.firstname,
lastname: args.data.lastname
});
if(args.data.timezone != null && args.data.timezone.length > 0) {
$.extend(data, {
timezone: args.data.timezone
});
}
$.extend(data, {
domainid: accountObj.domainid,
account: accountObj.name,
accounttype: accountObj.accounttype
});
$.ajax({
url: createURL("createUser" + array1.join("")),
dataType: "json",
url: createURL('createUser'),
data: data,
success: function(json) {
var item = json.createuserresponse.user;
args.response.success({data: item});
@ -942,15 +947,17 @@
edit: {
label: 'label.edit',
action: function(args) {
var array1 = [];
array1.push("&username=" + todb(args.data.username));
array1.push("&email=" + todb(args.data.email));
array1.push("&firstname=" + todb(args.data.firstname));
array1.push("&lastname=" + todb(args.data.lastname));
array1.push("&timezone=" + todb(args.data.timezone));
var data = {
id: args.context.users[0].id,
username: args.data.username,
email: args.data.email,
firstname: args.data.firstname,
lastname: args.data.lastname,
timezone: args.data.timezone
};
$.ajax({
url: createURL("updateUser&id=" + args.context.users[0].id + array1.join("")),
dataType: "json",
url: createURL('updateUser'),
data: data,
success: function(json) {
var item = json.updateuserresponse.user;
args.response.success({data:item});
@ -993,11 +1000,15 @@
var password = args.data.newPassword;
if (md5Hashed)
password = $.md5(password);
else
password = todb(password);
var data = {
id: args.context.users[0].id,
password: password
};
$.ajax({
url: createURL("updateUser&id=" + args.context.users[0].id + "&password=" + password),
dataType: "json",
url: createURL('updateUser'),
data: data,
async: true,
success: function(json) {
args.response.success({data: json.updateuserresponse.user});
@ -1022,10 +1033,12 @@
}
},
action: function(args) {
var data = {
id: args.context.users[0].id
};
$.ajax({
url: createURL("registerUserKeys&id=" + args.context.users[0].id),
dataType: "json",
async: true,
url: createURL('registerUserKeys'),
data: data,
success: function(json) {
args.response.success({data: json.registeruserkeysresponse.userkeys});
}
@ -1049,10 +1062,12 @@
}
},
action: function(args) {
var data = {
id: args.context.users[0].id
};
$.ajax({
url: createURL("disableUser&id=" + args.context.users[0].id),
dataType: "json",
async: true,
url: createURL('disableUser'),
data: data,
success: function(json) {
var jid = json.disableuserresponse.jobid;
args.response.success(
@ -1086,10 +1101,12 @@
}
},
action: function(args) {
var data = {
id: args.context.users[0].id
};
$.ajax({
url: createURL("enableUser&id=" + args.context.users[0].id),
dataType: "json",
async: true,
url: createURL('enableUser'),
data: data,
success: function(json) {
args.response.success({data: json.enableuserresponse.user});
},
@ -1116,10 +1133,12 @@
}
},
action: function(args) {
var data = {
id: args.context.users[0].id
};
$.ajax({
url: createURL("deleteUser&id=" + args.context.users[0].id),
dataType: "json",
async: true,
url: createURL('deleteUser'),
data: data,
success: function(json) {
args.response.success();
}

View File

@ -161,34 +161,51 @@
},
action: function(args) {
var array1 = [];
array1.push("&name=" + args.data.name);
array1.push("&displaytext=" + todb(args.data.description));
array1.push("&storageType=" + todb(args.data.storageType));
array1.push("&cpuNumber=" + args.data.cpuNumber);
array1.push("&cpuSpeed="+ args.data.cpuSpeed);
array1.push("&memory=" + args.data.memory);
var data = {
issystem: false,
name: args.data.name,
displaytext: args.data.description,
storageType: args.data.storageType,
cpuNumber: args.data.cpuNumber,
cpuSpeed: args.data.cpuSpeed,
memory: args.data.memory
};
if(args.data.networkRate != null && args.data.networkRate.length > 0) {
$.extend(data, {
networkrate: args.data.networkRate
});
}
if(args.data.networkRate != null && args.data.networkRate.length > 0)
array1.push("&networkrate=" + args.data.networkRate);
array1.push("&offerha=" + (args.data.offerHA == "on"));
if(args.data.storageTags != null && args.data.storageTags.length > 0)
array1.push("&tags=" + todb(args.data.storageTags));
if(args.data.hostTags != null && args.data.hostTags.length > 0)
array1.push("&hosttags=" + todb(args.data.hostTags));
array1.push("&limitcpuuse=" + (args.data.cpuCap == "on"));
if(args.$form.find('.form-item[rel=domainId]').css("display") != "none")
array1.push("&domainid=" + args.data.domainId);
$.extend(data, {
offerha: (args.data.offerHA == "on")
});
if(args.data.storageTags != null && args.data.storageTags.length > 0) {
$.extend(data, {
tags: args.data.storageTags
});
}
if(args.data.hostTags != null && args.data.hostTags.length > 0) {
$.extend(data, {
hosttags: args.data.hostTags
});
}
$.extend(data, {
limitcpuuse: (args.data.cpuCap == "on")
});
if(args.$form.find('.form-item[rel=domainId]').css("display") != "none") {
$.extend(data, {
domainid: args.data.domainId
});
}
$.ajax({
url: createURL("createServiceOffering&issystem=false"+array1.join("")),
dataType: "json",
async: true,
url: createURL('createServiceOffering'),
data: data,
success: function(json) {
var item = json.createserviceofferingresponse.serviceoffering;
args.response.success({data: item});
@ -208,22 +225,16 @@
},
dataProvider: function(args) {
var array1 = [];
if(args.filterBy != null) {
if(args.filterBy.search != null && args.filterBy.search.by != null && args.filterBy.search.value != null) {
switch(args.filterBy.search.by) {
case "name":
if(args.filterBy.search.value.length > 0)
array1.push("&keyword=" + args.filterBy.search.value);
break;
}
}
}
var data = {};
listViewDataProvider(args, data);
$.extend(data, {
issystem: false
});
$.ajax({
url: createURL("listServiceOfferings&issystem=false&page=" + args.page + "&pagesize=" + pageSize + array1.join("")),
dataType: "json",
async: true,
url: createURL('listServiceOfferings'),
data: data,
success: function(json) {
var items = json.listserviceofferingsresponse.serviceoffering;
args.response.success({
@ -243,12 +254,14 @@
edit: {
label: 'label.edit',
action: function(args) {
var array1 = [];
array1.push("&name=" + todb(args.data.name));
array1.push("&displaytext=" + todb(args.data.displaytext));
var data = {
id: args.context.serviceOfferings[0].id,
name: args.data.name,
displaytext: args.data.displaytext
};
$.ajax({
url: createURL("updateServiceOffering&id=" + args.context.serviceOfferings[0].id + array1.join("")),
dataType: "json",
url: createURL('updateServiceOffering'),
data: data,
success: function(json) {
var item = json.updateserviceofferingresponse.serviceoffering;
args.response.success({data: item});
@ -271,9 +284,12 @@
}
},
action: function(args) {
var data = {
id: args.context.serviceOfferings[0].id
};
$.ajax({
url: createURL("deleteServiceOffering&id=" + args.context.serviceOfferings[0].id),
dataType: "json",
url: createURL('deleteServiceOffering'),
data: data,
async: true,
success: function(json) {
args.response.success();
@ -340,10 +356,14 @@
}
],
dataProvider: function(args) {
dataProvider: function(args) {
var data = {
issystem: false,
id: args.context.serviceOfferings[0].id
};
$.ajax({
url: createURL("listServiceOfferings&issystem=false&id=" + args.context.serviceOfferings[0].id),
dataType: "json",
url: createURL('listServiceOfferings'),
data: data,
async: true,
success: function(json) {
var item = json.listserviceofferingsresponse.serviceoffering[0];
@ -508,35 +528,52 @@
},
action: function(args) {
var array1 = [];
array1.push("&name=" + args.data.name);
array1.push("&displaytext=" + todb(args.data.description));
array1.push("&systemvmtype=" + todb(args.data.systemvmtype));
array1.push("&storageType=" + todb(args.data.storageType));
array1.push("&cpuNumber=" + args.data.cpuNumber);
array1.push("&cpuSpeed="+ args.data.cpuSpeed);
array1.push("&memory=" + args.data.memory);
var data = {
issystem: true,
name: args.data.name,
displaytext: args.data.description,
systemvmtype: args.data.systemvmtype,
storageType: args.data.storageType,
cpuNumber: args.data.cpuNumber,
cpuSpeed: args.data.cpuSpeed,
memory: args.data.memory
};
if(args.data.networkRate != null && args.data.networkRate.length > 0)
array1.push("&networkrate=" + args.data.networkRate);
if(args.data.networkRate != null && args.data.networkRate.length > 0) {
$.extend(data, {
networkrate: args.data.networkRate
});
}
array1.push("&offerha=" + (args.data.offerHA == "on"));
$.extend(data, {
offerha: (args.data.offerHA == "on")
});
if(args.data.storageTags != null && args.data.storageTags.length > 0) {
$.extend(data, {
tags: args.data.storageTags
});
}
if(args.data.storageTags != null && args.data.storageTags.length > 0)
array1.push("&tags=" + todb(args.data.storageTags));
if(args.data.hostTags != null && args.data.hostTags.length > 0) {
$.extend(data, {
hosttags: args.data.hostTags
});
}
if(args.data.hostTags != null && args.data.hostTags.length > 0)
array1.push("&hosttags=" + todb(args.data.hostTags));
array1.push("&limitcpuuse=" + (args.data.cpuCap == "on"));
if(args.$form.find('.form-item[rel=domainId]').css("display") != "none")
array1.push("&domainid=" + args.data.domainId);
$.extend(data, {
limitcpuuse: (args.data.cpuCap == "on")
});
if(args.$form.find('.form-item[rel=domainId]').css("display") != "none") {
$.extend(data, {
domainid: args.data.domainId
});
}
$.ajax({
url: createURL("createServiceOffering&issystem=true"+array1.join("")),
dataType: "json",
async: true,
url: createURL('createServiceOffering'),
data: data,
success: function(json) {
var item = json.createserviceofferingresponse.serviceoffering;
args.response.success({data: item});
@ -556,22 +593,16 @@
},
dataProvider: function(args) {
var array1 = [];
if(args.filterBy != null) {
if(args.filterBy.search != null && args.filterBy.search.by != null && args.filterBy.search.value != null) {
switch(args.filterBy.search.by) {
case "name":
if(args.filterBy.search.value.length > 0)
array1.push("&keyword=" + args.filterBy.search.value);
break;
}
}
}
var data = {};
listViewDataProvider(args, data);
$.extend(data, {
issystem: true
});
$.ajax({
url: createURL("listServiceOfferings&issystem=true&page=" + args.page + "&pagesize=" + pageSize + array1.join("")),
dataType: "json",
async: true,
url: createURL('listServiceOfferings'),
data: data,
success: function(json) {
var items = json.listserviceofferingsresponse.serviceoffering;
args.response.success({data:items});
@ -588,12 +619,14 @@
edit: {
label: 'label.edit',
action: function(args) {
var array1 = [];
array1.push("&name=" + todb(args.data.name));
array1.push("&displaytext=" + todb(args.data.displaytext));
var data = {
id: args.context.systemServiceOfferings[0].id,
name: args.data.name,
displaytext: args.data.displaytext
};
$.ajax({
url: createURL("updateServiceOffering&id=" + args.context.systemServiceOfferings[0].id + array1.join("")),
dataType: "json",
url: createURL('updateServiceOffering'),
data: data,
success: function(json) {
var item = json.updateserviceofferingresponse.serviceoffering;
args.response.success({data: item});
@ -616,10 +649,12 @@
}
},
action: function(args) {
var data = {
id: args.context.systemServiceOfferings[0].id
};
$.ajax({
url: createURL("deleteServiceOffering&id=" + args.context.systemServiceOfferings[0].id),
dataType: "json",
async: true,
url: createURL('deleteServiceOffering'),
data: data,
success: function(json) {
args.response.success();
},
@ -703,11 +738,14 @@
}
],
dataProvider: function(args) {
dataProvider: function(args) {
var data = {
issystem: true,
id: args.context.systemServiceOfferings[0].id
};
$.ajax({
url: createURL("listServiceOfferings&issystem=true&id=" + args.context.systemServiceOfferings[0].id),
dataType: "json",
async: true,
url: createURL('listServiceOfferings'),
data: data,
success: function(json) {
var item = json.listserviceofferingsresponse.serviceoffering[0];
args.response.success({
@ -750,22 +788,12 @@
reorder: cloudStack.api.actions.sort('updateDiskOffering', 'diskOfferings'),
dataProvider: function(args) {
var array1 = [];
if(args.filterBy != null) {
if(args.filterBy.search != null && args.filterBy.search.by != null && args.filterBy.search.value != null) {
switch(args.filterBy.search.by) {
case "name":
if(args.filterBy.search.value.length > 0)
array1.push("&keyword=" + args.filterBy.search.value);
break;
}
}
}
var data = {};
listViewDataProvider(args, data);
$.ajax({
url: createURL("listDiskOfferings&page=" + args.page + "&pagesize=" + pageSize + array1.join("")),
dataType: "json",
async: true,
url: createURL('listDiskOfferings'),
data: data,
success: function(json) {
var items = json.listdiskofferingsresponse.diskoffering;
args.response.success({data:items});
@ -860,25 +888,35 @@
},
action: function(args) {
var array1 = [];
array1.push("&name=" + args.data.name);
array1.push("&displaytext=" + todb(args.data.description));
var data = {
isMirrored: false,
name: args.data.name,
displaytext: args.data.description,
storageType: args.data.storageType,
customized: (args.data.isCustomized=="on")
};
if(args.$form.find('.form-item[rel=disksize]').css("display") != "none") {
$.extend(data, {
disksize: args.data.disksize
});
}
array1.push("&storageType=" + todb(args.data.storageType));
array1.push("&customized=" + (args.data.isCustomized=="on"));
if(args.$form.find('.form-item[rel=disksize]').css("display") != "none")
array1.push("&disksize=" + args.data.disksize);
if(args.data.tags != null && args.data.tags.length > 0) {
$.extend(data, {
tags: args.data.tags
});
}
if(args.data.tags != null && args.data.tags.length > 0)
array1.push("&tags=" + todb(args.data.tags));
if(args.$form.find('.form-item[rel=domainId]').css("display") != "none")
array1.push("&domainid=" + args.data.domainId);
if(args.$form.find('.form-item[rel=domainId]').css("display") != "none") {
$.extend(data, {
domainid: args.data.domainId
});
}
$.ajax({
url: createURL("createDiskOffering&isMirrored=false" + array1.join("")),
dataType: "json",
async: true,
url: createURL('createDiskOffering'),
data: data,
success: function(json) {
var item = json.creatediskofferingresponse.diskoffering;
args.response.success({data: item});
@ -903,12 +941,14 @@
edit: {
label: 'label.edit',
action: function(args) {
var array1 = [];
array1.push("&name=" + todb(args.data.name));
array1.push("&displaytext=" + todb(args.data.displaytext));
var data = {
id: args.context.diskOfferings[0].id,
name: args.data.name,
displaytext: args.data.displaytext
};
$.ajax({
url: createURL("updateDiskOffering&id=" + args.context.diskOfferings[0].id + array1.join("")),
dataType: "json",
url: createURL('updateDiskOffering'),
data: data,
success: function(json) {
var item = json.updatediskofferingresponse.diskoffering;
args.response.success({data: item});
@ -931,10 +971,12 @@
}
},
action: function(args) {
var data = {
id: args.context.diskOfferings[0].id
};
$.ajax({
url: createURL("deleteDiskOffering&id=" + args.context.diskOfferings[0].id),
dataType: "json",
async: true,
url: createURL('deleteDiskOffering'),
data: data,
success: function(json) {
args.response.success();
},
@ -990,10 +1032,12 @@
],
dataProvider: function(args) {
var data = {
id: args.context.diskOfferings[0].id
};
$.ajax({
url: createURL("listDiskOfferings&id=" + args.context.diskOfferings[0].id),
dataType: "json",
async: true,
url: createURL('listDiskOfferings'),
data: data,
success: function(json) {
var item = json.listdiskofferingsresponse.diskoffering[0];
args.response.success({
@ -1028,26 +1072,12 @@
},
dataProvider: function(args) {
var array1 = [];
if(args.filterBy != null) {
if(args.filterBy.search != null && args.filterBy.search.by != null && args.filterBy.search.value != null) {
switch(args.filterBy.search.by) {
case "name":
if(args.filterBy.search.value.length > 0)
array1.push("&keyword=" + args.filterBy.search.value);
break;
}
}
}
var data = {};
listViewDataProvider(args, data);
$.ajax({
url: createURL('listNetworkOfferings' + array1.join("")),
data: {
page: args.page,
pagesize: pageSize
},
dataType: "json",
async: true,
url: createURL('listNetworkOfferings'),
data: data,
success: function(json) {
var items = json.listnetworkofferingsresponse.networkoffering;
@ -1743,13 +1773,16 @@
edit: {
label: 'label.edit',
action: function(args) {
var array1 = [];
array1.push("&name=" + todb(args.data.name));
array1.push("&displaytext=" + todb(args.data.displaytext));
array1.push("&availability=" + args.data.availability);
var data = {
id: args.context.networkOfferings[0].id,
name: args.data.name,
displaytext: args.data.displaytext,
availability: args.data.availability
};
$.ajax({
url: createURL("updateNetworkOffering&id=" + args.context.networkOfferings[0].id + array1.join("")),
dataType: "json",
url: createURL('updateNetworkOffering'),
data: data,
success: function(json) {
//if availability is being updated from Required to Optional
if(args.context.networkOfferings[0].availability == "Required" && args.data.availability == "Optional")

View File

@ -193,16 +193,20 @@
label: 'label.add.domain',
action: function(args) {
var array1 = [];
array1.push("&parentdomainid=" + args.context.domains[0].id);
array1.push("&name=" + todb(args.data.name));
if(args.data.networkdomain != null && args.data.networkdomain.length > 0)
array1.push("&networkdomain=" + todb(args.data.networkdomain));
var data = {
parentdomainid: args.context.domains[0].id,
name: args.data.name
};
if(args.data.networkdomain != null && args.data.networkdomain.length > 0) {
$.extend(data, {
networkdomain: args.data.networkdomain
});
}
$.ajax({
url: createURL("createDomain" + array1.join("")),
dataType: "json",
async: false,
url: createURL('createDomain'),
data: data,
success: function(json) {
var item = json.createdomainresponse.domain;
args.response.success({data: item});

View File

@ -30,16 +30,14 @@
actions: {
edit: {
label: 'label.change.value',
action: function(args) {
var name = args.data.jsonObj.name;
var value = args.data.value;
action: function(args) {
var data = {
name: args.data.jsonObj.name,
value: args.data.value
};
$.ajax({
url: createURL(
'updateConfiguration&name=' + name + '&value=' + value
),
dataType: 'json',
async: true,
url: createURL('updateConfiguration'),
data: data,
success: function(json) {
var item = json.updateconfigurationresponse.configuration;
if(item.category == "Usage")
@ -95,22 +93,12 @@
maxguestslimit: { label: 'label.max.guest.limit' }
},
dataProvider: function(args) {
var array1 = [];
if(args.filterBy != null) {
if(args.filterBy.search != null && args.filterBy.search.by != null && args.filterBy.search.value != null) {
switch(args.filterBy.search.by) {
case "name":
if(args.filterBy.search.value.length > 0)
array1.push("&keyword=" + args.filterBy.search.value);
break;
}
}
}
var data = {};
listViewDataProvider(args, data);
$.ajax({
url: createURL("listHypervisorCapabilities&page=" + args.page + "&pagesize=" + pageSize + array1.join("")),
dataType: "json",
async: true,
url: createURL('listHypervisorCapabilities'),
data: data,
success: function(json) {
var items = json.listhypervisorcapabilitiesresponse.hypervisorCapabilities;
args.response.success({data:items});
@ -127,11 +115,14 @@
edit: {
label: 'label.edit',
action: function(args) {
var array1 = [];
array1.push("&maxguestslimit=" + todb(args.data.maxguestslimit));
var data = {
id: args.context.hypervisorCapabilities[0].id,
maxguestslimit: args.data.maxguestslimit
};
$.ajax({
url: createURL("updateHypervisorCapabilities&id=" + args.context.hypervisorCapabilities[0].id + array1.join("")),
dataType: "json",
url: createURL('updateHypervisorCapabilities'),
data: data,
success: function(json) {
var item = json.updatehypervisorcapabilitiesresponse['null'];
args.response.success({data: item});

View File

@ -467,9 +467,17 @@
//create new network starts here
if(args.data["new-network"] == "create-new-network") {
var isCreateNetworkSuccessful = true;
var data = {
networkOfferingId: args.data["new-network-networkofferingid"],
name: args.data["new-network-name"],
displayText: args.data["new-network-name"],
zoneId: selectedZoneObj.id
};
$.ajax({
url: createURL("createNetwork&networkOfferingId="+args.data["new-network-networkofferingid"]+"&name="+todb(args.data["new-network-name"])+"&displayText="+todb(args.data["new-network-name"])+"&zoneId="+selectedZoneObj.id),
dataType: "json",
url: createURL('createNetwork'),
data: data,
async: false,
success: function(json) {
newNetwork = json.createnetworkresponse.network;

View File

@ -762,6 +762,7 @@
tabFilter: function(args) {
var networkOfferingHavingELB = false;
var hasNetworkACL = false;
var isVPC = false;
$.ajax({
url: createURL("listNetworkOfferings&id=" + args.context.networks[0].networkofferingid),
@ -770,6 +771,10 @@
success: function(json) {
var networkoffering = json.listnetworkofferingsresponse.networkoffering[0];
if (networkoffering.forvpc) {
isVPC = true;
}
$(networkoffering.service).each(function(){
var thisService = this;
@ -786,13 +791,13 @@
}
});
var hiddenTabs = [];
var hiddenTabs = ['egressRules']; // Disable egress UI, for now
if (!networkOfferingHavingELB) {
hiddenTabs.push("addloadBalancer");
}
if (!hasNetworkACL) {
if (!hasNetworkACL || isVPC) {
hiddenTabs.push('egressRules');
}
@ -956,7 +961,8 @@
dataType: "json",
async: true,
success: function(json) {
var jsonObj = json.listnetworksresponse.network[0];
var jsonObj = json.listnetworksresponse.network[0];
addExtraPropertiesToGuestNetworkObject(jsonObj);
args.response.success(
{
actionFilter: cloudStack.actionFilter.guestNetwork,
@ -3072,10 +3078,9 @@
$.ajax({
url: createURL('removeVpnUser'),
data: {
username: args.context.multiRule[0].username,
id: args.context.multiRule[0].domainid,
account: args.context.ipAddresses[0].account,
domainid: args.context.ipAddresses[0].domainid
domainid: args.context.multiRule[0].domainid,
account: args.context.multiRule[0].account,
username: args.context.multiRule[0].username
},
dataType: 'json',
async: true,
@ -3414,174 +3419,187 @@
}
})
},
egressRules: {
title: 'label.egress.rule',
custom: cloudStack.uiCustom.securityRules({
noSelect: true,
noHeaderActionsColumn: true,
fields: {
'protocol': {
label: 'label.protocol',
select: function(args) {
args.$select.change(function() {
var $inputs = args.$form.find('th, td');
var $icmpFields = $inputs.filter(function() {
var name = $(this).attr('rel');
custom: function(args) {
var context = args.context;
return $.inArray(name, [
'icmptype',
'icmpcode'
]) > -1;
});
var $otherFields = $inputs.filter(function() {
var name = $(this).attr('rel');
return $('<div>').multiEdit({
context: context,
noSelect: true,
noHeaderActionsColumn: true,
fields: {
'cidrlist': { edit: true, label: 'label.cidr' },
'protocol': {
label: 'label.protocol',
select: function(args) {
args.$select.change(function() {
var $inputs = args.$form.find('th, td');
var $icmpFields = $inputs.filter(function() {
var name = $(this).attr('rel');
return name != 'icmptype' &&
name != 'icmpcode' &&
name != 'protocol' &&
name != 'add-rule' &&
name != 'cidr' &&
name != 'accountname' &&
name != 'securitygroup';
return $.inArray(name, [
'icmptype',
'icmpcode'
]) > -1;
});
var $otherFields = $inputs.filter(function() {
var name = $(this).attr('rel');
return name != 'cidrlist' &&
name != 'icmptype' &&
name != 'icmpcode' &&
name != 'protocol' &&
name != 'add-rule';
});
if ($(this).val() == 'icmp') {
$icmpFields.show();
$otherFields.hide();
} else {
$icmpFields.hide();
$otherFields.show();
}
});
if ($(this).val() == 'icmp') {
$icmpFields.show();
$otherFields.hide();
} else {
$icmpFields.hide();
$otherFields.show();
args.response.success({
data: [
{ name: 'tcp', description: 'TCP' },
{ name: 'udp', description: 'UDP' },
{ name: 'icmp', description: 'ICMP' }
]
});
}
},
'startport': { edit: true, label: 'label.start.port' },
'endport': { edit: true, label: 'label.end.port' },
'icmptype': { edit: true, label: 'ICMP.type', isHidden: true },
'icmpcode': { edit: true, label: 'ICMP.code', isHidden: true },
'add-rule': {
label: 'label.add',
addButton: true
}
},
add: {
label: 'label.add',
action: function(args) {
var data = {
protocol: args.data.protocol,
cidrlist: args.data.cidrlist,
trafficType: 'Egress'
};
if (args.data.icmptype && args.data.icmpcode) { // ICMP
$.extend(data, {
icmptype: args.data.icmptype,
icmpcode: args.data.icmpcode
});
} else { // TCP/UDP
$.extend(data, {
startport: args.data.startport,
endport: args.data.endport
});
}
// Get Source NAT IP
var sourceNATIP;
$.ajax({
url: createURL('listPublicIpAddresses'),
data: {
listAll: true,
associatednetworkid: args.context.networks[0].id
},
async: false,
success: function(json) {
var ipAddresses = json.listpublicipaddressesresponse.publicipaddress;
sourceNATIP = $.grep(ipAddresses, function(ipAddress) {
return ipAddress.issourcenat;
})[0];
}
});
args.response.success({
data: [
{ name: 'tcp', description: 'TCP' },
{ name: 'udp', description: 'UDP' },
{ name: 'icmp', description: 'ICMP' }
]
data.ipaddressid = sourceNATIP.id;
$.ajax({
url: createURL('createFirewallRule'),
data: data,
dataType: 'json',
async: true,
success: function(json) {
var jobId = json.createfirewallruleresponse.jobid;
args.response.success({
_custom: {
jobId: jobId
},
notification: {
label: 'label.add.egress.rule',
poll: pollAsyncJobResult
}
});
},
error: function(json) {
args.response.error(parseXMLHttpResponse(json));
}
});
}
},
'startport': { edit: true, label: 'label.start.port' },
'endport': { edit: true, label: 'label.end.port' },
'icmptype': { edit: true, label: 'ICMP.type', isHidden: true },
'icmpcode': { edit: true, label: 'ICMP.code', isHidden: true },
'cidr': { edit: true, label: 'label.cidr', isHidden: true },
'accountname': {
edit: true,
label: 'label.account.and.security.group',
isHidden: true,
range: ['accountname', 'securitygroup']
actions: {
destroy: {
label: 'label.remove.rule',
action: function(args) {
$.ajax({
url: createURL('deleteFirewallRule'),
data: {
id: args.context.multiRule[0].id
},
dataType: 'json',
async: true,
success: function(data) {
var jobID = data.deletefirewallruleresponse.jobid;
args.response.success({
_custom: {
jobId: jobID
},
notification: {
label: 'label.remove.egress.rule',
poll: pollAsyncJobResult
}
});
},
error: function(json) {
args.response.error(parseXMLHttpResponse(json));
}
});
}
}
},
'add-rule': {
label: 'label.add',
addButton: true
}
},
add: {
label: 'label.add',
action: function(args) {
var data = {
securitygroupid: args.context.securityGroups[0].id,
protocol: args.data.protocol,
domainid: args.context.securityGroups[0].domainid,
account: args.context.securityGroups[0].account
};
// TCP / ICMP
if (args.data.icmptype && args.data.icmpcode) { // ICMP
$.extend(data, {
icmptype: args.data.icmptype,
icmpcode: args.data.icmpcode
});
} else { // TCP
$.extend(data, {
startport: args.data.startport,
endport: args.data.endport
});
}
// CIDR / account
if (args.data.cidr) {
data.cidrlist = args.data.cidr;
} else {
data['usersecuritygrouplist[0].account'] = args.data.accountname;
data['usersecuritygrouplist[0].group'] = args.data.securitygroup;
}
ignoreEmptyFields: true,
dataProvider: function(args) {
$.ajax({
url: createURL('authorizeSecurityGroupEgress'),
data: data,
url: createURL('listFirewallRules'),
data: {
listAll: true,
networkid: args.context.networks[0].id,
trafficType: 'Egress'
},
dataType: 'json',
async: true,
success: function(data) {
var jobId = data.authorizesecuritygroupegressresponse.jobid;
success: function(json) {
var response = json.listfirewallrulesresponse.firewallrule;
args.response.success({
_custom: {
jobId: jobId
},
notification: {
label: 'label.add.egress.rule',
poll: pollAsyncJobResult
}
data: response
});
}
});
}
},
actions: {
destroy: {
label: 'label.remove.rule',
action: function(args) {
$.ajax({
url: createURL('revokeSecurityGroupEgress'),
data: {
domainid: args.context.securityGroups[0].domainid,
account: args.context.securityGroups[0].account,
id: args.context.multiRule[0].id
},
dataType: 'json',
async: true,
success: function(data) {
var jobID = data.revokesecuritygroupegress.jobid;
args.response.success({
_custom: {
jobId: jobID
},
notification: {
label: 'label.remove.egress.rule',
poll: pollAsyncJobResult
}
});
}
});
}
}
},
ignoreEmptyFields: true,
dataProvider: function(args) {
$.ajax({
url: createURL('listSecurityGroups'),
data: {
id: args.context.securityGroups[0].id
},
dataType: 'json',
async: true,
success: function(data) {
args.response.success({
data: $.map(
data.listsecuritygroupsresponse.securitygroup[0].egressrule ?
data.listsecuritygroupsresponse.securitygroup[0].egressrule : [],
ingressEgressDataMap
)
});
}
});
}
})
});
}
}
},
@ -4030,52 +4048,51 @@
});
}
},
router: {
title: 'VPC Router Details',
fields:[
{
name: {label:'label.name'}
},
{
id:{ label:'label.id'},
zonename: { label: 'label.zone'},
dns1: {label: 'label.dns'},
gateway: {label:'label.gateway'},
publicip: {label: 'label.public.ip'},
guestipaddress:{ label: 'label.guest.ip'},
linklocalip: {label: 'label.linklocal.ip'},
state: { label:'label.state'},
serviceofferingname: {label:'label.service.offering'},
isredundantrouter:{
label: 'label.redundant.router',
converter: function(booleanValue) {
if (booleanValue == true) {
return "<font color='red'>Yes</font>";
}
return "No";
}
},
account: {label:'label.account'},
domain: {label: 'label.domain'}
router: {
title: 'VPC Router Details',
fields:[
{
name: {label:'label.name'}
},
{
id:{ label:'label.id'},
zonename: { label: 'label.zone'},
dns1: {label: 'label.dns'},
gateway: {label:'label.gateway'},
publicip: {label: 'label.public.ip'},
guestipaddress:{ label: 'label.guest.ip'},
linklocalip: {label: 'label.linklocal.ip'},
state: { label:'label.state'},
serviceofferingname: {label:'label.service.offering'},
isredundantrouter:{
label: 'label.redundant.router',
converter: function(booleanValue) {
if (booleanValue == true) {
return "<font color='red'>Yes</font>";
}
],
dataProvider: function(args) {
$.ajax ({
url:createURL("listRouters&listAll=true&vpcid=" +args.context.vpc[0].id),
dataType: "json",
async: true,
success:function(json) {
var item = json.listroutersresponse.router[0];
args.response.success ({
data:item
})
}
});
}
return "No";
}
},
account: {label:'label.account'},
domain: {label: 'label.domain'}
}
],
dataProvider: function(args) {
$.ajax({
url: createURL("listRouters&listAll=true&vpcid=" +args.context.vpc[0].id),
dataType: "json",
async: true,
success: function(json) {
var item = json.listroutersresponse.router[0];
args.response.success({
actionFilter: cloudStack.sections.system.routerActionFilter,
data:item
});
}
});
}
}
}
}
}

View File

@ -569,13 +569,18 @@
// Project listing data provider
dataProvider: function(args) {
var user = args.context.users[0];
var data = {
accountId: user.userid,
listAll: true
};
if (args.projectName) {
data.keyword = args.projectName;
}
$.ajax({
url: createURL('listProjects', { ignoreProject: true }),
data: {
accountId: user.userid,
listAll: true
},
data: data,
dataType: 'json',
async: true,
success: function(data) {

View File

@ -37,8 +37,8 @@ var ERROR_INTERNET_CANNOT_CONNECT = 12029;
var ERROR_VMOPS_ACCOUNT_ERROR = 531;
// Default password is MD5 hashed. Set the following variable to false to disable this.
var md5Hashed = true;
var md5HashedLogin = true;
var md5Hashed = false;
var md5HashedLogin = false;
//page size for API call (e.g."listXXXXXXX&pagesize=N" )
var pageSize = 20;
@ -454,6 +454,29 @@ function listViewDataProvider(args, data) {
});
}
//used by infrastruct page and network page
var addExtraPropertiesToGuestNetworkObject = function(jsonObj) {
jsonObj.networkdomaintext = jsonObj.networkdomain;
jsonObj.networkofferingidText = jsonObj.networkofferingid;
if(jsonObj.acltype == "Domain") {
if(jsonObj.domainid == rootAccountId)
jsonObj.scope = "All";
else
jsonObj.scope = "Domain (" + jsonObj.domain + ")";
}
else if (jsonObj.acltype == "Account"){
if(jsonObj.project != null)
jsonObj.scope = "Account (" + jsonObj.domain + ", " + jsonObj.project + ")";
else
jsonObj.scope = "Account (" + jsonObj.domain + ", " + jsonObj.account + ")";
}
if(jsonObj.vlan == null && jsonObj.broadcasturi != null) {
jsonObj.vlan = jsonObj.broadcasturi.replace("vlan://", "");
}
}
//find service object in network object
function ipFindNetworkServiceByName(pName, networkObj) {
if(networkObj == null)

View File

@ -194,6 +194,10 @@
podCount: function(data) {
$.ajax({
url: createURL('listPods'),
data: {
page: 1,
pagesize: 1 //specifying pagesize as 1 because we don't need any embedded objects to be returned here. The only thing we need from API response is "count" property.
},
success: function(json) {
dataFns.clusterCount($.extend(data, {
podCount: json.listpodsresponse.count ?
@ -206,11 +210,24 @@
clusterCount: function(data) {
$.ajax({
url: createURL('listClusters'),
success: function(json) {
dataFns.hostCount($.extend(data, {
data: {
page: 1,
pagesize: 1 //specifying pagesize as 1 because we don't need any embedded objects to be returned here. The only thing we need from API response is "count" property.
},
success: function(json) {
dataFns.hostCount($.extend(data, {
clusterCount: json.listclustersresponse.count ?
json.listclustersresponse.count : 0
}));
//comment the 4 lines above and uncomment the following 4 lines if listHosts API still responds slowly.
/*
dataFns.primaryStorageCount($.extend(data, {
clusterCount: json.listclustersresponse.count ?
json.listclustersresponse.count : 0
}));
*/
}
});
},
@ -219,7 +236,9 @@
$.ajax({
url: createURL('listHosts'),
data: {
type: 'routing'
type: 'routing',
page: 1,
pagesize: 1 //specifying pagesize as 1 because we don't need any embedded objects to be returned here. The only thing we need from API response is "count" property.
},
success: function(json) {
dataFns.primaryStorageCount($.extend(data, {
@ -233,11 +252,24 @@
primaryStorageCount: function(data) {
$.ajax({
url: createURL('listStoragePools'),
success: function(json) {
dataFns.secondaryStorageCount($.extend(data, {
data: {
page: 1,
pagesize: 1 //specifying pagesize as 1 because we don't need any embedded objects to be returned here. The only thing we need from API response is "count" property.
},
success: function(json) {
dataFns.secondaryStorageCount($.extend(data, {
primaryStorageCount: json.liststoragepoolsresponse.count ?
json.liststoragepoolsresponse.count : 0
}));
//comment the 4 lines above and uncomment the following 4 lines if listHosts API still responds slowly.
/*
dataFns.systemVmCount($.extend(data, {
primaryStorageCount: json.liststoragepoolsresponse.count ?
json.liststoragepoolsresponse.count : 0
}));
*/
}
});
},
@ -246,7 +278,9 @@
$.ajax({
url: createURL('listHosts'),
data: {
type: 'SecondaryStorage'
type: 'SecondaryStorage',
page: 1,
pagesize: 1 //specifying pagesize as 1 because we don't need any embedded objects to be returned here. The only thing we need from API response is "count" property.
},
success: function(json) {
dataFns.systemVmCount($.extend(data, {
@ -260,6 +294,10 @@
systemVmCount: function(data) {
$.ajax({
url: createURL('listSystemVms'),
data: {
page: 1,
pagesize: 1 //specifying pagesize as 1 because we don't need any embedded objects to be returned here. The only thing we need from API response is "count" property.
},
success: function(json) {
dataFns.virtualRouterCount($.extend(data, {
systemVmCount: json.listsystemvmsresponse.count ?
@ -273,14 +311,18 @@
$.ajax({
url: createURL('listRouters'),
data: {
projectid: -1
projectid: -1,
page: 1,
pagesize: 1 //specifying pagesize as 1 because we don't need any embedded objects to be returned here. The only thing we need from API response is "count" property.
},
success: function(json) {
var total1 = json.listroutersresponse.count ? json.listroutersresponse.count : 0;
$.ajax({
url: createURL('listRouters'),
data: {
listAll: true
listAll: true,
page: 1,
pagesize: 1 //specifying pagesize as 1 because we don't need any embedded objects to be returned here. The only thing we need from API response is "count" property.
},
success: function(json) {
var total2 = json.listroutersresponse.count ? json.listroutersresponse.count : 0;
@ -336,13 +378,10 @@
data: data
});
};
// re: CS-16413 -- Disable API calls
return args.response.success({
data: {}
});
dataFns.zoneCount({});
//dataFns.zoneCount({});
dataFns.podCount({}); //uncomment the line above and remove this line after "count" in listZones API is fixed.
}
},
@ -10314,7 +10353,7 @@
return allowedActions;
}
var routerActionfilter = function(args) {
var routerActionfilter = cloudStack.sections.system.routerActionFilter = function(args) {
var jsonObj = args.context.item;
var allowedActions = [];
@ -10391,28 +10430,6 @@
}
}
var addExtraPropertiesToGuestNetworkObject = function(jsonObj) {
jsonObj.networkdomaintext = jsonObj.networkdomain;
jsonObj.networkofferingidText = jsonObj.networkofferingid;
if(jsonObj.acltype == "Domain") {
if(jsonObj.domainid == rootAccountId)
jsonObj.scope = "All";
else
jsonObj.scope = "Domain (" + jsonObj.domain + ")";
}
else if (jsonObj.acltype == "Account"){
if(jsonObj.project != null)
jsonObj.scope = "Account (" + jsonObj.domain + ", " + jsonObj.project + ")";
else
jsonObj.scope = "Account (" + jsonObj.domain + ", " + jsonObj.account + ")";
}
if(jsonObj.vlan == null && jsonObj.broadcasturi != null) {
jsonObj.vlan = jsonObj.broadcasturi.replace("vlan://", "");
}
}
var addExtraPropertiesToRouterInstanceObject = function(jsonObj) {
if(jsonObj.isredundantrouter == true)
jsonObj["redundantRouterState"] = jsonObj.redundantstate;

View File

@ -1428,7 +1428,7 @@
// "Delete Template"
//if (((isUser() && jsonObj.ispublic == true && !(jsonObj.domainid == g_domainid && jsonObj.account == g_account)))
if (((isAdmin() == false && !(jsonObj.domainid == g_domainid && jsonObj.account == g_account))) //if neither root-admin, nor item owner
if (((isAdmin() == false && !(jsonObj.domainid == g_domainid && jsonObj.account == g_account) && !(jsonObj.domainid == g_domainid && jsonObj.projectid == cloudStack.context.projects[0].id))) //if neither root-admin, nor the same account, nor the same project
|| (jsonObj.isready == false && jsonObj.status != null && jsonObj.status.indexOf("Downloaded") != -1)
|| (jsonObj.account == "system")) {
//do nothing
@ -1487,7 +1487,7 @@
// "Delete ISO"
//if (((isUser() && jsonObj.ispublic == true && !(jsonObj.domainid == g_domainid && jsonObj.account == g_account)))
if (((isAdmin() == false && !(jsonObj.domainid == g_domainid && jsonObj.account == g_account))) //if neither root-admin, nor item owner
if (((isAdmin() == false && !(jsonObj.domainid == g_domainid && jsonObj.account == g_account) && !(jsonObj.domainid == g_domainid && jsonObj.projectid == cloudStack.context.projects[0].id))) //if neither root-admin, nor the same account, nor the same project
|| (jsonObj.isready == false && jsonObj.status != null && jsonObj.status.indexOf("Downloaded") != -1)
|| (jsonObj.account == "system")
) {

View File

@ -137,8 +137,8 @@
)
.append(
$('<div>').addClass('select-desc')
.append($('<div>').addClass('name').html(this[fields.name]))
.append($('<div>').addClass('desc').html(this[fields.desc]))
.append($('<div>').addClass('name').html(_s(this[fields.name])))
.append($('<div>').addClass('desc').html(_s(this[fields.desc])))
)
.data('json-obj', this);
@ -876,6 +876,12 @@
}
});
$wizard.find('div.data-disk-offering div.custom-size input[type=text]').bind('change',function() {
var old = $wizard.find('div.data-disk-offering div.custom-size input[type=text]').val();
$wizard.find('div.data-disk-offering span.custom-disk-size').html(_s(old));
});
return $wizard.dialog({
title: _l('label.vm.add'),
width: 800,

View File

@ -543,8 +543,9 @@
var $cancel = $('<div>').addClass('button cancel').html(_l('label.cancel'));
// Get project data
var loadData = function(complete) {
var loadData = function(complete, options) {
cloudStack.projects.dataProvider({
projectName: options ? options.projectName : null,
context: cloudStack.context,
response: {
success: function(args) {
@ -585,7 +586,9 @@
// Search form
$searchForm.submit(function() {
$list.find('li').remove();
loadData();
loadData(null, {
projectName: $(this).find('input[type=text]').val()
});
return false;
});

View File

@ -349,6 +349,11 @@
'cloudBrowser',
{
'breadcrumb': function($target, $browser, data) {
if ($ ('#browser').hasClass('panel-highlight')) {
return false;
}
$browser.cloudBrowser('selectPanel', { panel: data.panel });
}
}

View File

@ -251,8 +251,27 @@
routerDetailView: function() {
return {
title: 'VPC router details',
updateContext: function(args) {
var router;
$.ajax({
url: createURL("listRouters&listAll=true&vpcid=" +args.context.vpc[0].id),
dataType: "json",
async: false,
success: function(json) {
router = json.listroutersresponse.router[0];
}
});
return {
routers: [router]
};
},
actions: cloudStack.sections.system.subsections.virtualRouters
.listView.detailView.actions,
tabs: {
routerDetails: cloudStack.sections.network.sections.vpc.listView.detailView.tabs.router
routerDetails: cloudStack.sections.network.sections.vpc
.listView.detailView.tabs.router
}
};
},
@ -1595,7 +1614,8 @@
}
});
var hiddenTabs = [];
var hiddenTabs = ['ipAddresses']; // Disable IP address tab; it is redundant with 'view all' button
if(networkOfferingHavingELB == false)
hiddenTabs.push("addloadBalancer");
return hiddenTabs;

View File

@ -161,6 +161,7 @@
<configuration>
<excludes>
<exclude>com/cloud/utils/testcase/*TestCase*</exclude>
<exclude>com/cloud/utils/db/*Test*</exclude>
</excludes>
</configuration>
</plugin>