diff --git a/agent/pom.xml b/agent/pom.xml
index a3d071b1c6a..aceb4d71dd3 100644
--- a/agent/pom.xml
+++ b/agent/pom.xml
@@ -83,7 +83,7 @@
+ value="${cs.replace.properties}" />
@@ -97,7 +97,7 @@
+ value="${cs.replace.properties}" />
@@ -106,6 +106,24 @@
+
+ org.apache.maven.plugins
+ maven-dependency-plugin
+ 2.5.1
+
+
+ copy-dependencies
+ package
+
+ copy-dependencies
+
+
+ ${project.build.directory}/dependencies
+ runtime
+
+
+
+
diff --git a/api/src/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/com/cloud/agent/api/to/VirtualMachineTO.java
index bdd636e727b..b84d20a9239 100644
--- a/api/src/com/cloud/agent/api/to/VirtualMachineTO.java
+++ b/api/src/com/cloud/agent/api/to/VirtualMachineTO.java
@@ -28,8 +28,20 @@ public class VirtualMachineTO {
private BootloaderType bootloader;
Type type;
int cpus;
+
+ /**
+ 'speed' is still here since 4.0.X/4.1.X management servers do not support
+ the overcommit feature yet.
+
+ The overcommit feature sends minSpeed and maxSpeed
+
+ So this is here for backwards compatibility with 4.0.X/4.1.X management servers
+ and newer agents.
+ */
+ Integer speed;
Integer minSpeed;
Integer maxSpeed;
+
long minRam;
long maxRam;
String hostName;
@@ -48,6 +60,21 @@ public class VirtualMachineTO {
VolumeTO[] disks;
NicTO[] nics;
+ public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) {
+ this.id = id;
+ this.name = instanceName;
+ this.type = type;
+ this.cpus = cpus;
+ this.speed = speed;
+ this.minRam = minRam;
+ this.maxRam = maxRam;
+ this.bootloader = bootloader;
+ this.os = os;
+ this.enableHA = enableHA;
+ this.limitCpuUse = limitCpuUse;
+ this.vncPassword = vncPassword;
+ }
+
public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer minSpeed, Integer maxSpeed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) {
this.id = id;
this.name = instanceName;
@@ -103,6 +130,10 @@ public class VirtualMachineTO {
this.cpus = cpus;
}
+ public Integer getSpeed() {
+ return speed;
+ }
+
public Integer getMinSpeed() {
return minSpeed;
}
diff --git a/api/src/com/cloud/network/TrafficLabel.java b/api/src/com/cloud/network/TrafficLabel.java
new file mode 100644
index 00000000000..782df14b7dc
--- /dev/null
+++ b/api/src/com/cloud/network/TrafficLabel.java
@@ -0,0 +1,36 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.network;
+
+import com.cloud.network.Networks.TrafficType;
+
+/* User can provide a Label, while configuring a zone, to specify
+ * a physical network that is to be used for a traffic type defined
+ * by CloudStack. See the enum data type TrafficType. This label is
+ * called Traffic label. This might encapsulate physical network
+ * specific properties like VLAN ID, name of virtual network object or more.
+ * The name of virtual network object is dependent on type of hypervisor.
+ * For example it is name of xenserver bridge in case of XenServer and
+ * name of virtual switch in case of VMware hypervisor
+ */
+public interface TrafficLabel {
+
+ public TrafficType getTrafficType();
+
+ public String getNetworkLabel();
+
+}
diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java
index 2a09de8d3d8..a26b4683a3a 100755
--- a/api/src/org/apache/cloudstack/api/ApiConstants.java
+++ b/api/src/org/apache/cloudstack/api/ApiConstants.java
@@ -360,7 +360,10 @@ public class ApiConstants {
public static final String CUSTOM_DISK_OFF_MAX_SIZE = "customdiskofferingmaxsize";
public static final String DEFAULT_ZONE_ID = "defaultzoneid";
public static final String GUID = "guid";
-
+ public static final String VSWITCH_TYPE_GUEST_TRAFFIC = "guestvswitchtype";
+ public static final String VSWITCH_TYPE_PUBLIC_TRAFFIC = "publicvswitchtype";
+ public static final String VSWITCH_NAME_GUEST_TRAFFIC = "guestvswitchname";
+ public static final String VSWITCH_NAME_PUBLIC_TRAFFIC = "publicvswitchname";
public static final String EXTERNAL_SWITCH_MGMT_DEVICE_ID = "vsmdeviceid";
public static final String EXTERNAL_SWITCH_MGMT_DEVICE_NAME = "vsmdevicename";
public static final String EXTERNAL_SWITCH_MGMT_DEVICE_STATE = "vsmdevicestate";
diff --git a/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java
index 7b1cd067eb1..d55ccd7dd11 100644
--- a/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java
@@ -91,6 +91,34 @@ public class AddClusterCmd extends BaseCmd {
@Parameter(name = ApiConstants.MEMORY_OVERCOMMIT_RATIO, type = CommandType.STRING, required = false ,description = "value of the default ram overcommit ratio, defaults to 1")
private String memoryovercommitratio;
+ @Parameter(name = ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, type = CommandType.STRING, required = false, description = "Type of virtual switch used for guest traffic in the cluster. Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)")
+ private String vSwitchTypeGuestTraffic;
+
+ @Parameter(name = ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, type = CommandType.STRING, required = false, description = "Type of virtual switch used for public traffic in the cluster. Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)")
+ private String vSwitchTypePublicTraffic;
+
+ @Parameter(name = ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, type = CommandType.STRING, required = false, description = "Name of virtual switch used for guest traffic in the cluster. This would override zone wide traffic label setting.")
+ private String vSwitchNameGuestTraffic;
+
+ @Parameter(name = ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, type = CommandType.STRING, required = false, description = "Name of virtual switch used for public traffic in the cluster. This would override zone wide traffic label setting.")
+ private String vSwitchNamePublicTraffic;
+
+ public String getVSwitchTypeGuestTraffic() {
+ return vSwitchTypeGuestTraffic;
+ }
+
+ public String getVSwitchTypePublicTraffic() {
+ return vSwitchTypePublicTraffic;
+ }
+
+ public String getVSwitchNameGuestTraffic() {
+ return vSwitchNameGuestTraffic;
+ }
+
+ public String getVSwitchNamePublicTraffic() {
+ return vSwitchNamePublicTraffic;
+ }
+
public String getVSMIpaddress() {
return vsmipaddress;
}
diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
index da9d3467792..b86784ed0b0 100644
--- a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
@@ -49,7 +49,7 @@ public class CreateStoragePoolCmd extends BaseCmd {
/////////////////////////////////////////////////////
@Parameter(name=ApiConstants.CLUSTER_ID, type=CommandType.UUID, entityType = ClusterResponse.class,
- required=true, description="the cluster ID for the storage pool")
+ description="the cluster ID for the storage pool")
private Long clusterId;
@Parameter(name=ApiConstants.DETAILS, type=CommandType.MAP, description="the details for the storage pool")
@@ -59,7 +59,7 @@ public class CreateStoragePoolCmd extends BaseCmd {
private String storagePoolName;
@Parameter(name=ApiConstants.POD_ID, type=CommandType.UUID, entityType = PodResponse.class,
- required=true, description="the Pod ID for the storage pool")
+ description="the Pod ID for the storage pool")
private Long podId;
@Parameter(name=ApiConstants.TAGS, type=CommandType.STRING, description="the tags for the storage pool")
diff --git a/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java b/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java
index 97fe2ffeb90..bbfb598b1db 100644
--- a/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java
@@ -53,6 +53,9 @@ public class ListZonesByCmd extends BaseListCmd {
description="the ID of the domain associated with the zone")
private Long domainId;
+ @Parameter(name=ApiConstants.NAME, type=CommandType.STRING, description="the name of the zone")
+ private String name;
+
@Parameter(name=ApiConstants.SHOW_CAPACITIES, type=CommandType.BOOLEAN, description="flag to display the capacity of the zones")
private Boolean showCapacities;
@@ -72,6 +75,10 @@ public class ListZonesByCmd extends BaseListCmd {
return domainId;
}
+ public String getName(){
+ return name;
+ }
+
public Boolean getShowCapacities() {
return showCapacities;
}
diff --git a/awsapi/pom.xml b/awsapi/pom.xml
index 8e07f9e2124..f19a71381d3 100644
--- a/awsapi/pom.xml
+++ b/awsapi/pom.xml
@@ -354,7 +354,7 @@
+ value="${cs.replace.properties}" />
diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties
index a0ed7c9a277..d167a5c0a4f 100644
--- a/client/WEB-INF/classes/resources/messages.properties
+++ b/client/WEB-INF/classes/resources/messages.properties
@@ -17,6 +17,13 @@
#new labels (begin) **********************************************************************************************
+label.menu.regions=Regions
+label.region=Region
+label.add.region=Add Region
+label.remove.region=Remove Region
+message.remove.region=Are you sure you want to remove this region from this management server?
+message.add.region=Please specify the required information to add a new region.
+label.endpoint=Endpoint
label.plugins=Plugins
label.plugin.details=Plugin details
label.author.name=Author name
diff --git a/client/pom.xml b/client/pom.xml
index b485be53805..8e1ad2bc064 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -308,7 +308,7 @@
+ value="${cs.replace.properties}" />
@@ -319,7 +319,7 @@
-
+
@@ -330,7 +330,7 @@
-
+
@@ -341,7 +341,7 @@
-
+
@@ -351,10 +351,26 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/client/tomcatconf/classpath.conf.in b/client/tomcatconf/classpath.conf.in
index 3ae0fb4d778..5625f69ed33 100644
--- a/client/tomcatconf/classpath.conf.in
+++ b/client/tomcatconf/classpath.conf.in
@@ -16,23 +16,11 @@
# specific language governing permissions and limitations
# under the License.
-SYSTEMJARS="@SYSTEMJARS@"
-SCP=$(build-classpath $SYSTEMJARS 2>/dev/null) ; if [ $? != 0 ] ; then export SCP="@SYSTEMCLASSPATH@" ; fi
-MCP="@MSCLASSPATH@"
-DCP="@DEPSCLASSPATH@"
-CLASSPATH=$SCP:$DCP:$MCP:@MSCONF@:@SETUPDATADIR@
-for jarfile in "@PREMIUMJAVADIR@"/* ; do
- if [ ! -e "$jarfile" ] ; then continue ; fi
- CLASSPATH=$jarfile:$CLASSPATH
-done
-for plugin in "@PLUGINJAVADIR@"/* ; do
- if [ ! -e "$plugin" ] ; then continue ; fi
- CLASSPATH=$plugin:$CLASSPATH
-done
-for vendorconf in "@MSCONF@"/vendor/* ; do
- if [ ! -d "$vendorconf" ] ; then continue ; fi
- CLASSPATH=$vendorconf:$CLASSPATH
+# We use WEB-INF/lib in the webapp for including things into
+# the classpath nowdays
+
+for jar in /usr/share/tomcat6/lib/*.jar; do
+ CLASSPATH=$jar:$CLASSPATH
done
+CLASSPATH=${CLASSPATH}/usr/share/cloudstack-management/webapps/client/WEB-INF/lib/mysql-connector-java-5.1.21.jar
export CLASSPATH
-PATH=/sbin:/usr/sbin:$PATH
-export PATH
diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in
index d31c32cecfd..fd8bf283b0e 100644
--- a/client/tomcatconf/componentContext.xml.in
+++ b/client/tomcatconf/componentContext.xml.in
@@ -48,7 +48,8 @@
-
+
+
@@ -113,12 +114,15 @@
-
+
+
-
+
diff --git a/client/tomcatconf/components.xml.in b/client/tomcatconf/components.xml.in
index c62abe8ff2a..7d86a1c2cb9 100755
--- a/client/tomcatconf/components.xml.in
+++ b/client/tomcatconf/components.xml.in
@@ -66,10 +66,6 @@ under the License.
-
-
-
-
diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in
index 36ca00d735c..2773d54c41c 100644
--- a/client/tomcatconf/nonossComponentContext.xml.in
+++ b/client/tomcatconf/nonossComponentContext.xml.in
@@ -49,7 +49,8 @@
-
+
+
diff --git a/core/src/com/cloud/storage/DiskOfferingVO.java b/core/src/com/cloud/storage/DiskOfferingVO.java
index 5f4f18bcd34..e4fc21c7c13 100755
--- a/core/src/com/cloud/storage/DiskOfferingVO.java
+++ b/core/src/com/cloud/storage/DiskOfferingVO.java
@@ -311,4 +311,8 @@ public class DiskOfferingVO implements DiskOffering {
public int getSortKey() {
return sortKey;
}
+
+ public void setRecreatable(boolean recreatable) {
+ this.recreatable = recreatable;
+ }
}
diff --git a/core/src/com/cloud/storage/template/RawImageProcessor.java b/core/src/com/cloud/storage/template/RawImageProcessor.java
index 7833eabcabf..a002df5c9b2 100644
--- a/core/src/com/cloud/storage/template/RawImageProcessor.java
+++ b/core/src/com/cloud/storage/template/RawImageProcessor.java
@@ -57,6 +57,7 @@ public class RawImageProcessor extends AdapterBase implements Processor {
String imgPath = templatePath + File.separator + templateName + "." + ImageFormat.RAW.getFileExtension();
if (!_storage.exists(imgPath)) {
s_logger.debug("Unable to find raw image:" + imgPath);
+ return null;
}
FormatInfo info = new FormatInfo();
info.format = ImageFormat.RAW;
diff --git a/debian/README b/debian/README
deleted file mode 100644
index cbfbf1bb9d9..00000000000
--- a/debian/README
+++ /dev/null
@@ -1,6 +0,0 @@
-The Debian Package
-----------------------------
-
-This is part of the Cloud Stack collection of packages.
-
- -- Manuel Amador (Rudd-O) Thu, 25 Mar 2010 15:12:06 -0700
diff --git a/debian/changelog b/debian/changelog
index c3243aad5e3..cbbaad32cad 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+cloudstack (4.1.0-incubating-0.0.snapshot) unstable; urgency=low
+
+ * Incorporate incubating into version, remove epoch
+
+ -- Noa Resare Tue, 05 Feb 2013 18:05:28 +0000
+
cloud (1:4.0.0-1) unstable; urgency=low
* Bumping the version to 4.0.0
diff --git a/debian/cloud-agent.config b/debian/cloud-agent.config
deleted file mode 100644
index 00ae6c00d2e..00000000000
--- a/debian/cloud-agent.config
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
diff --git a/debian/cloud-cli.install b/debian/cloud-cli.install
deleted file mode 100644
index ce178461150..00000000000
--- a/debian/cloud-cli.install
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/etc/cloud/cli/commands.xml
-/usr/bin/cloud-grab-dependent-library-versions
-/usr/bin/cloud-tool
-/usr/bin/cloudvoladm
diff --git a/debian/cloud-client-ui.install b/debian/cloud-client-ui.install
deleted file mode 100644
index ba1408afcc5..00000000000
--- a/debian/cloud-client-ui.install
+++ /dev/null
@@ -1,19 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/share/cloud/management/webapps/client/*
-
diff --git a/debian/cloud-client.install b/debian/cloud-client.install
deleted file mode 100644
index aadb145ae1c..00000000000
--- a/debian/cloud-client.install
+++ /dev/null
@@ -1,58 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/etc/cloud/management/catalina.policy
-/etc/cloud/management/catalina.properties
-/etc/cloud/management/commands.properties
-/etc/cloud/management/components.xml
-/etc/cloud/management/context.xml
-/etc/cloud/management/db.properties
-/etc/cloud/management/environment.properties
-/etc/cloud/management/ehcache.xml
-/etc/cloud/management/log4j-cloud.xml
-/etc/cloud/management/logging.properties
-/etc/cloud/management/server.xml
-/etc/cloud/management/tomcat6.conf
-/etc/cloud/management/classpath.conf
-/etc/cloud/management/tomcat-users.xml
-/etc/cloud/management/web.xml
-/etc/cloud/management/server-nonssl.xml
-/etc/cloud/management/tomcat6-nonssl.conf
-/etc/cloud/management/virtualrouter_commands.properties
-/etc/cloud/management/f5bigip_commands.properties
-/etc/cloud/management/junipersrx_commands.properties
-/etc/cloud/management/netscalerloadbalancer_commands.properties
-/etc/cloud/management/cisconexusvsm_commands.properties
-/etc/cloud/management/Catalina
-/etc/cloud/management/Catalina/localhost
-/etc/cloud/management/Catalina/localhost/client
-/etc/init.d/cloud-management
-/usr/share/cloud/management/bin
-/usr/share/cloud/management/conf
-/usr/share/cloud/management/lib
-/usr/share/cloud/management/logs
-/usr/share/cloud/management/temp
-/usr/share/cloud/management/work
-/var/cache/cloud/management
-/var/cache/cloud/management/work
-/var/cache/cloud/management/temp
-/var/log/cloud/management
-/var/lib/cloud/mnt
-/var/lib/cloud/management
-/usr/bin/cloud-setup-management
-/usr/bin/cloud-update-xenserver-licenses
-/etc/cloud/management/commands-ext.properties
diff --git a/debian/cloud-client.postinst b/debian/cloud-client.postinst
deleted file mode 100644
index 87c7610320c..00000000000
--- a/debian/cloud-client.postinst
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/sh -e
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-case "$1" in
- configure)
- if ! id cloud > /dev/null 2>&1 ; then
- adduser --system --home /var/lib/cloud/management --no-create-home \
- --group --disabled-password --shell /bin/sh cloud
- # update me in all the .postinst that you can find me in, as well
- fi
-
- for i in /var/lib/cloud/mnt /var/cache/cloud/management \
- /var/cache/cloud/management/work /var/cache/cloud/management/temp \
- /var/log/cloud/management /etc/cloud/management/Catalina \
- /etc/cloud/management/Catalina/localhost /var/lib/cloud/management /etc/cloud/management/Catalina/localhost/client
- do
- chmod 0770 $i
- chgrp cloud $i
- done
-
- for i in /etc/cloud/management/db.properties
- do
- chmod 0640 $i
- chgrp cloud $i
- done
-
- if [ "$2" = "" ] ; then # no recently configured version, this is a first install
- /usr/sbin/update-rc.d cloud-management defaults || true
- fi
-
- ;;
-esac
-
-#DEBHELPER#
diff --git a/debian/cloud-core.install b/debian/cloud-core.install
deleted file mode 100644
index 00a43d48259..00000000000
--- a/debian/cloud-core.install
+++ /dev/null
@@ -1,19 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/share/java/cloud-core.jar
-
diff --git a/debian/cloud-deps.install b/debian/cloud-deps.install
deleted file mode 100644
index 74aade12543..00000000000
--- a/debian/cloud-deps.install
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/share/java/ehcache-1.5.0.jar
-/usr/share/java/mail-1.4.jar
-/usr/share/java/httpcore-4.0.jar
-/usr/share/java/log4j-*.jar
-/usr/share/java/apache-log4j-extras-1.1.jar
-/usr/share/java/trilead-ssh2-build213-svnkit-1.3-patch.jar
-/usr/share/java/xmlrpc-common-3.*.jar
-/usr/share/java/xmlrpc-client-3.*.jar
-/usr/share/java/jstl-1.2.jar
-/usr/share/java/axis2-1.5.1.jar
-/usr/share/java/wsdl4j-1.6.2.jar
-/usr/share/java/bcprov-*.jar
-/usr/share/java/jasypt-1.*.jar
-/usr/share/java/ejb-api-3.0.jar
-/usr/share/java/javax.persistence-2.0.0.jar
-/usr/share/java/gson-1.7.1.jar
-/usr/share/java/xapi-5.6.100-1-SNAPSHOT.jar
diff --git a/debian/cloud-python.install b/debian/cloud-python.install
deleted file mode 100644
index b8eac722a6d..00000000000
--- a/debian/cloud-python.install
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/lib/python*/dist-packages/cloud*
diff --git a/debian/cloud-scripts.install b/debian/cloud-scripts.install
deleted file mode 100644
index 5e8896d43a3..00000000000
--- a/debian/cloud-scripts.install
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/lib/cloud/common/scripts/installer/*
-/usr/lib/cloud/common/scripts/network/*
-/usr/lib/cloud/common/scripts/storage/*
-/usr/lib/cloud/common/scripts/util/*
-/usr/lib/cloud/common/scripts/vm/network/*
-/usr/lib/cloud/common/scripts/vm/systemvm/*
-/usr/lib/cloud/common/scripts/vm/pingtest.sh
-/usr/lib/cloud/common/scripts/vm/hypervisor/kvm/*
-/usr/lib/cloud/common/scripts/vm/hypervisor/versions.sh
-/usr/lib/cloud/common/scripts/vm/hypervisor/xenserver/*
diff --git a/debian/cloud-server.install b/debian/cloud-server.install
deleted file mode 100644
index f792cc2f7cd..00000000000
--- a/debian/cloud-server.install
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/share/java/cloud-server.jar
-/usr/share/java/cloud-ovm.jar
-/etc/cloud/server/*
-/usr/share/java/cloud-dp-user-concentrated-pod.jar
-/usr/share/java/cloud-dp-user-dispersing.jar
-/usr/share/java/cloud-host-allocator-random.jar
-/usr/share/java/cloud-plugin-elb.jar
-/usr/share/java/cloud-plugin-ovs.jar
-/usr/share/java/cloud-plugin-nicira-nvp.jar
-/usr/share/java/cloud-plugin-bigswitch-vns.jar
-/usr/share/java/cloud-storage-allocator-random.jar
-/usr/share/java/cloud-user-authenticator-ldap.jar
-/usr/share/java/cloud-user-authenticator-md5.jar
-/usr/share/java/cloud-user-authenticator-plaintext.jar
-/usr/share/java/cloud-plugin-hypervisor-xen.jar
diff --git a/debian/cloud-system-iso.install b/debian/cloud-system-iso.install
deleted file mode 100644
index 5a0b6364f4c..00000000000
--- a/debian/cloud-system-iso.install
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/lib/cloud/common/vms/systemvm.iso
diff --git a/debian/cloud-usage.install b/debian/cloud-usage.install
deleted file mode 100644
index 22f58344102..00000000000
--- a/debian/cloud-usage.install
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/share/java/cloud-usage.jar
-/etc/init.d/cloud-usage
-/var/log/cloud/usage
-/etc/cloud/usage/usage-components.xml
-/etc/cloud/usage/log4j-cloud_usage.xml
-/etc/cloud/usage/db.properties
diff --git a/debian/cloud-usage.postinst b/debian/cloud-usage.postinst
deleted file mode 100644
index 56f895df6e0..00000000000
--- a/debian/cloud-usage.postinst
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh -e
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-case "$1" in
- configure)
-
- if ! id cloud > /dev/null 2>&1 ; then
- adduser --system --home /var/lib/cloud/management --no-create-home \
- --group --disabled-password --shell /bin/sh cloud
- # update me in cloud-client.postinst as well
- fi
-
- for i in /var/log/cloud/usage
- do
- chmod 0770 $i
- chgrp cloud $i
- done
-
- for i in /etc/cloud/usage/db.properties
- do
- chmod 0640 $i
- chgrp cloud $i
- done
-
- if [ "$2" = "" ] ; then # no recently configured version, this is a first install
- /usr/sbin/update-rc.d cloud-usage defaults || true
- fi
-
- ;;
-esac
-
-#DEBHELPER#
diff --git a/debian/cloud-utils.install b/debian/cloud-utils.install
deleted file mode 100644
index 39c357a1fa6..00000000000
--- a/debian/cloud-utils.install
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/share/java/cloud-utils.jar
-/usr/share/java/cloud-api.jar
-/usr/share/doc/cloud/version-info
-/usr/bin/cloud-sccs
-/usr/bin/cloud-gitrevs
diff --git a/debian/cloud-agent.install b/debian/cloudstack-agent.install
similarity index 92%
rename from debian/cloud-agent.install
rename to debian/cloudstack-agent.install
index c67e90ab492..84eee10b578 100644
--- a/debian/cloud-agent.install
+++ b/debian/cloudstack-agent.install
@@ -16,10 +16,10 @@
# under the License.
/etc/cloud/agent/agent.properties
-/etc/cloud/agent/developer.properties.template
/etc/cloud/agent/environment.properties
/etc/cloud/agent/log4j-cloud.xml
/etc/init.d/cloud-agent
/usr/bin/cloud-setup-agent
/usr/bin/cloud-ssh
-/var/log/cloud/agent
+/var/log/cloudstack/agent
+/usr/share/cloudstack-agent/lib/cloudstack-agent-kvm.jar
diff --git a/debian/cloud-client.config b/debian/cloudstack-awsapi.install
similarity index 96%
rename from debian/cloud-client.config
rename to debian/cloudstack-awsapi.install
index 00ae6c00d2e..02ba66829ef 100644
--- a/debian/cloud-client.config
+++ b/debian/cloudstack-awsapi.install
@@ -15,3 +15,4 @@
# specific language governing permissions and limitations
# under the License.
+/var/log/cloudstack/awsapi
\ No newline at end of file
diff --git a/debian/cloud-cli.config b/debian/cloudstack-cli.install
similarity index 97%
rename from debian/cloud-cli.config
rename to debian/cloudstack-cli.install
index 00ae6c00d2e..287f9b1f651 100644
--- a/debian/cloud-cli.config
+++ b/debian/cloudstack-cli.install
@@ -13,5 +13,4 @@
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
-# under the License.
-
+# under the License.
\ No newline at end of file
diff --git a/debian/cloud-agent-libs.install b/debian/cloudstack-common.install
similarity index 50%
rename from debian/cloud-agent-libs.install
rename to debian/cloudstack-common.install
index ba25935588c..de97ee9383a 100644
--- a/debian/cloud-agent-libs.install
+++ b/debian/cloudstack-common.install
@@ -15,5 +15,19 @@
# specific language governing permissions and limitations
# under the License.
-/usr/share/java/cloud-agent.jar
-/usr/share/java/cloud-plugin-hypervisor-kvm.jar
+/usr/share/cloudstack-common/lib/cloudstack-core.jar
+/usr/share/cloudstack-common/lib/cloudstack-api.jar
+/usr/share/cloudstack-common/vms/systemvm.iso
+/usr/share/cloudstack-common/scripts/installer/*
+/usr/share/cloudstack-common/scripts/network/*
+/usr/share/cloudstack-common/scripts/storage/*
+/usr/share/cloudstack-common/scripts/util/*
+/usr/share/cloudstack-common/scripts/vm/network/*
+/usr/share/cloudstack-common/scripts/vm/systemvm/*
+/usr/share/cloudstack-common/scripts/vm/pingtest.sh
+/usr/share/cloudstack-common/scripts/vm/hypervisor/kvm/*
+/usr/share/cloudstack-common/scripts/vm/hypervisor/versions.sh
+/usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/*
+/usr/bin/cloud-set-guest-password
+/usr/bin/cloud-set-guest-sshkey
+/usr/lib/python2.?/*-packages/*
diff --git a/debian/cloud-management.config b/debian/cloudstack-docs.install
similarity index 97%
rename from debian/cloud-management.config
rename to debian/cloudstack-docs.install
index 00ae6c00d2e..287f9b1f651 100644
--- a/debian/cloud-management.config
+++ b/debian/cloudstack-docs.install
@@ -13,5 +13,4 @@
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
-# under the License.
-
+# under the License.
\ No newline at end of file
diff --git a/debian/cloud-setup.install b/debian/cloudstack-management.install
similarity index 69%
rename from debian/cloud-setup.install
rename to debian/cloudstack-management.install
index 5c37c64920d..e87b4446bd0 100644
--- a/debian/cloud-setup.install
+++ b/debian/cloudstack-management.install
@@ -15,11 +15,17 @@
# specific language governing permissions and limitations
# under the License.
+/etc/cloud/server/*
+/etc/cloud/management/*
+/etc/init.d/cloud-management
+/var/cache/cloudstack/management
+/var/cache/cloudstack/management/work
+/var/cache/cloudstack/management/temp
+/var/log/cloudstack/management
+/var/lib/cloud/mnt
+/var/lib/cloud/management
+/usr/bin/cloud-update-xenserver-licenses
+/usr/bin/cloud-setup-management
/usr/bin/cloud-setup-databases
/usr/bin/cloud-migrate-databases
-/usr/bin/cloud-set-guest-password
-/usr/bin/cloud-set-guest-sshkey
-/usr/share/cloud/setup/*.sql
-/usr/share/cloud/setup/*.sh
-/usr/share/cloud/setup/server-setup.xml
-/usr/share/cloud/setup/db/*.sql
+/usr/share/cloudstack-management/*
diff --git a/debian/cloud-agent.postinst b/debian/cloudstack-management.postinst
similarity index 77%
rename from debian/cloud-agent.postinst
rename to debian/cloudstack-management.postinst
index f022f6d16bc..6fe01453c10 100644
--- a/debian/cloud-agent.postinst
+++ b/debian/cloudstack-management.postinst
@@ -16,19 +16,11 @@
# specific language governing permissions and limitations
# under the License.
-case "$1" in
- configure)
-
- for i in /var/log/cloud/agent
- do
- chmod 0770 $i
- done
-
- if [ "$2" = "" ] ; then # no recently configured version, this is a first install
- /usr/sbin/update-rc.d cloud-agent defaults || true
- fi
-
- ;;
-esac
+if [ "$1" = configure ]; then
+ if ! getent passwd cloud >/dev/null; then
+ adduser --quiet --system --group --no-create-home --home /usr/share/cloudstack-manager cloud
+ fi
+ chown cloud /var/log/cloudstack/management
+fi
#DEBHELPER#
diff --git a/debian/cloud-agent-deps.install b/debian/cloudstack-usage.install
similarity index 86%
rename from debian/cloud-agent-deps.install
rename to debian/cloudstack-usage.install
index b05b7d1d9d4..2696c20b5e9 100644
--- a/debian/cloud-agent-deps.install
+++ b/debian/cloudstack-usage.install
@@ -5,9 +5,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -15,4 +15,7 @@
# specific language governing permissions and limitations
# under the License.
-/usr/share/java/libvirt-0.4.9.jar
+/usr/share/cloudstack-usage/lib/cloudstack-usage.jar
+/etc/init.d/cloud-usage
+/var/log/cloudstack/usage
+/etc/cloud/usage/*
diff --git a/debian/control b/debian/control
index e9697ea5e26..659084c38ae 100644
--- a/debian/control
+++ b/debian/control
@@ -1,118 +1,51 @@
-Source: cloud
+Source: cloudstack
Section: libs
Priority: extra
Maintainer: Wido den Hollander
-Build-Depends: debhelper (>= 7), openjdk-6-jdk, tomcat6, libws-commons-util-java, libcommons-codec-java (>= 1.5), libcommons-httpclient-java (>= 3.1), libservlet2.5-java, genisoimage, python-mysqldb, maven3 | maven (>= 3), liblog4j1.2-java (>= 1.2.16)
+Build-Depends: debhelper (>= 7), openjdk-6-jdk, tomcat6, genisoimage,
+ python-mysqldb, maven3 | maven (>= 3), python (>= 2.6.6-3~)
Standards-Version: 3.8.1
Homepage: http://www.cloudstack.org/
-Package: cloud-deps
-Architecture: any
-Depends: openjdk-6-jre, libcommons-codec-java (>= 1.5), libcommons-httpclient-java (>= 3.1)
-Description: CloudStack library dependencies
- This package contains a number of third-party dependencies
- not shipped by distributions, required to run the CloudStack
- Management Server.
+Package: cloudstack-common
+Architecture: all
+Depends: bash, genisoimage
+Conflicts: cloud-scripts, cloud-utils, cloud-system-iso, cloud-console-proxy, cloud-daemonize, cloud-deps, cloud-python, cloud-setup
+Description: A common package which contains files which are shared by several CloudStack packages
-Package: cloud-agent-deps
-Architecture: any
-Depends: openjdk-6-jre, cloud-deps (= ${source:Version})
-Description: CloudStack agent library dependencies
- This package contains a number of third-party dependencies
- not shipped by distributions, required to run the CloudStack
- Agent.
-
-Package: cloud-utils
-Architecture: any
-Depends: openjdk-6-jre, python, libcglib-java (>= 2.2.2), libjsch-java (>= 0.1.42), libbackport-util-concurrent-java (>= 3.1), libcommons-dbcp-java (>= 1.4), libcommons-pool-java (>= 1.5.6)
-Description: CloudStack utility library
- The CloudStack utility libraries provide a set of Java classes used
- in the CloudStack environment.
-
-Package: cloud-client-ui
-Architecture: any
-Depends: openjdk-6-jre, cloud-client (= ${source:Version})
-Description: CloudStack management server UI
- The CloudStack management server is the central point of coordination,
- management, and intelligence in the CloudStack Cloud Stack. This package
- is a requirement of the cloud-client package, which installs the
- CloudStack management server.
-
-Package: cloud-server
-Architecture: any
-Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-scripts (= ${source:Version}), libservlet2.5-java
+Package: cloudstack-management
+Architecture: all
+Depends: cloudstack-common (= ${source:Version}), tomcat6, sysvinit-utils, chkconfig, sudo, jsvc, python-mysqldb, python-paramiko, augeas-tools
+Conflicts: cloud-server, cloud-client, cloud-client-ui
Description: CloudStack server library
- The CloudStack server libraries provide a set of Java classes used
- in the CloudStack management server.
+ The CloudStack management server
-Package: cloud-scripts
-Replaces: cloud-agent-scripts
-Architecture: any
-Depends: openjdk-6-jre, python, bash, bzip2, gzip, unzip, nfs-common, openssh-client, lsb-release
-Description: CloudStack scripts
- This package contains a number of scripts needed for the CloudStack Agent and Management Server.
- Both the CloudStack Agent and Management server depend on this package
-
-Package: cloud-core
-Architecture: any
-Depends: openjdk-6-jre, cloud-utils (= ${source:Version})
-Description: CloudStack core library
- The CloudStack core libraries provide a set of Java classes used
- in the CloudStack Cloud Stack.
-
-
-Package: cloud-client
-Architecture: any
-Depends: openjdk-6-jre, cloud-deps (= ${source:Version}), cloud-utils (= ${source:Version}), cloud-server (= ${source:Version}), cloud-client-ui (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-python (= ${source:Version}), tomcat6, libws-commons-util-java, sysvinit-utils, chkconfig, sudo, jsvc, python-mysqldb, python-paramiko, augeas-tools, genisoimage, cloud-system-iso, libmysql-java (>= 5.1)
-Description: CloudStack client
- The CloudStack management server is the central point of coordination,
- management, and intelligence in the CloudStack Cloud Stack. This package
- is required for the management server to work.
-
-Package: cloud-setup
-Architecture: any
-Depends: openjdk-6-jre, python, cloud-utils (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-python (= ${source:Version}), python-mysqldb
-Description: CloudStack client
- The CloudStack setup tools let you set up your Management Server and Usage Server.
-
-Package: cloud-python
-Architecture: any
-Depends: python
-Description: CloudStack Python library
- The CloudStack Python library contains a few Python modules that the
- CloudStack uses.
-
-Package: cloud-agent-libs
-Architecture: any
-Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version})
-Description: CloudStack agent libraries
- The CloudStack agent libraries are used by the Cloud Agent.
-
-Package: cloud-agent
-Architecture: any
-Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version}), cloud-python (= ${source:Version}), cloud-agent-libs (= ${source:Version}), cloud-scripts (= ${source:Version}), cloud-system-iso (= ${source:Version}), libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, liblog4j1.2-java (>= 1.2.16), libjna-java, wget, jsvc, lsb-base (>= 3.2)
+Package: cloudstack-agent
+Architecture: all
+Depends: openjdk-6-jre, cloudstack-common (= ${source:Version}), lsb-base (>= 3.2), openssh-client, libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, wget, jsvc
+Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts
Description: CloudStack agent
The CloudStack agent is in charge of managing shared computing resources in
a CloudStack powered cloud. Install this package if this computer
will participate in your cloud as a KVM HyperVisor.
-Package: cloud-system-iso
-Architecture: any
-Description: CloudStack system iso
- The CloudStack agent is in charge of managing shared computing resources in
- a CloudStack powered cloud. Install this package if this computer
- will participate in your cloud.
-
-Package: cloud-usage
-Architecture: any
-Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-client (= ${source:Version}), jsvc
+Package: cloudstack-usage
+Architecture: all
+Depends: openjdk-6-jre, cloudstack-common (= ${source:Version}), jsvc
Description: CloudStack usage monitor
The CloudStack usage monitor provides usage accounting across the entire cloud for
cloud operators to charge based on usage parameters.
-Package: cloud-cli
-Provides: cloud-cli
-Architecture: any
-Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-client (= ${source:Version})
-Description: CloudStack commandline tool
- The CloudStack commandline tool for invoking APi
+Package: cloudstack-awsapi
+Architecture: all
+Depends: cloudstack-common (= ${source:Version}), cloudstack-management (= ${source:Version})
+Description: CloudStack Amazon EC2 API
+
+Package: cloudstack-cli
+Architecture: all
+Depends: cloudstack-common (= ${source:Version})
+Description: The CloudStack CLI called CloudMonkey
+
+Package: cloudstack-docs
+Architecture: all
+Description: The CloudStack documentation
diff --git a/debian/rules b/debian/rules
index 69fba7a07ba..d537d86204b 100755
--- a/debian/rules
+++ b/debian/rules
@@ -10,6 +10,9 @@
# Modified to make a template file for a multi-binary package with separated
# build-arch and build-indep targets by Bill Allombert 2001
+DEBVERS := $(shell dpkg-parsechangelog | sed -n -e 's/^Version: //p')
+VERSION := $(shell echo '$(DEBVERS)' | sed -e 's/^[[:digit:]]*://' -e 's/[~-].*//')
+
# Uncomment this to turn on verbose mode.
export DH_VERBOSE=1
@@ -19,55 +22,118 @@ export DH_OPTIONS
configure: configure-stamp
configure-stamp:
dh_testdir
- cp packaging/debian/replace.properties build/replace.properties
- echo VERSION=$VERSION >> build/replace.properties
+ cp packaging/debian/replace.properties replace.properties.tmp
+ echo VERSION=${VERSION} >> replace.properties.tmp
touch configure-stamp
-build: build-arch
+build: build-indep
-build-arch: build-arch-stamp
-build-arch-stamp: configure-stamp
- mvn package -Dsystemvm
+build-indep: build-indep-stamp
+
+build-indep-stamp: configure
+ mvn package -DskipTests -Dsystemvm \
+ -Dcs.replace.properties=replace.properties.tmp
touch $@
clean:
dh_testdir
dh_testroot
rm -f build-arch-stamp build-indep-stamp configure-stamp
+ rm -f replace.properties.tmp
dh_clean
install:
dh_testdir
dh_testroot
dh_prep -s
- mkdir -p debian/tmp/usr/bin
- mkdir -p debian/tmp/usr/share/cloud/management
- mkdir -p debian/tmp/var/log/cloud
- mkdir debian/tmp/var/log/cloud/managament
- mkdir debian/tmp/var/log/cloud/awsapi
- mkdir debian/tmp/var/log/cloud/agent
- mkdir debian/tmp/var/log/cloud/ipallocator
- mkdir debian/tmp/var/log/cloud/usage
+
+ # Common packages
mkdir -p debian/tmp/etc/cloud
+ mkdir -p debian/tmp/etc/init.d
+ mkdir -p debian/tmp/var/cache/cloudstack
+ mkdir -p debian/tmp/var/log/cloudstack
+ mkdir -p debian/tmp/var/lib/cloud
+ mkdir -p debian/tmp/usr/bin
+ mkdir -p debian/tmp/usr/share/cloud
+ mkdir -p debian/tmp/usr/lib/cloud
+
+ # cloudstack-agent
mkdir debian/tmp/etc/cloud/agent
+ mkdir debian/tmp/var/log/cloudstack/agent
+ install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-agent/lib/cloudstack-agent-kvm.jar
+ install -D packaging/debian/init/cloud-agent debian/tmp/etc/init.d/
+ install -D agent/bindir/cloud-setup-agent.in debian/tmp/usr/bin/cloud-setup-agent
+ install -D agent/bindir/cloud-ssh.in debian/tmp/usr/bin/cloud-ssh
+ cp agent/target/transformed/* debian/tmp/etc/cloud/agent
+
+ # cloudstack-management
mkdir debian/tmp/etc/cloud/server
mkdir debian/tmp/etc/cloud/management
- mkdir debian/tmp/etc/cloud/usage
- mkdir -p debian/tmp/var/cache/cloud
- mkdir debian/tmp/var/cache/cloud/management
- mkdir -p debian/tmp/usr/share/cloud
- mkdir debian/tmp/usr/share/cloud/setup
- mkdir -p debian/tmp/usr/share/cloud/management/webapps/client
-
- cp -r client/target/utilities/scripts/db/* debian/tmp/usr/share/cloud/setup/
- cp -r client/target/cloud-client-ui-*-SNAPSHOT/* debian/tmp/usr/share/cloud/management/webapps/client/
- dh_installdirs -s
- dh_install -s
+ mkdir -p debian/tmp/usr/share/cloudstack-management
+ mkdir -p debian/tmp/usr/share/cloudstack-management/webapps/client
+ mkdir debian/tmp/usr/share/cloudstack-management/setup
+ mkdir debian/tmp/var/log/cloudstack/management
+ mkdir debian/tmp/var/cache/cloudstack/management
+ mkdir debian/tmp/var/cache/cloudstack/management/work
+ mkdir debian/tmp/var/cache/cloudstack/management/temp
+ mkdir debian/tmp/var/log/cloudstack/ipallocator
+ mkdir debian/tmp/var/lib/cloud/management
+ mkdir debian/tmp/var/lib/cloud/mnt
+ cp -r client/target/utilities/scripts/db/* debian/tmp/usr/share/cloudstack-management/setup/
+ cp -r client/target/cloud-client-ui-4.1.0-SNAPSHOT/* debian/tmp/usr/share/cloudstack-management/webapps/client/
+ cp server/target/conf/* debian/tmp/etc/cloud/server/
+ cp client/target/conf/* debian/tmp/etc/cloud/management/
+ ln -s tomcat6-nonssl.conf debian/tmp/etc/cloud/management/tomcat6.conf
+ mkdir -p debian/tmp/etc/cloud/management/Catalina/localhost/client
+ install -D packaging/debian/init/cloud-management debian/tmp/etc/init.d/cloud-management
+ install -D client/bindir/cloud-update-xenserver-licenses.in debian/tmp/usr/bin/cloud-update-xenserver-licenses
+ install -D server/target/cloud-server-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-management/lib/cloudstack-server.jar
+ ln -s /usr/share/tomcat6/bin debian/tmp/usr/share/cloudstack-management/bin
+ ln -s ../../../etc/cloud/management debian/tmp/usr/share/cloudstack-management/conf
+ ln -s /usr/share/tomcat6/lib debian/tmp/usr/share/cloudstack-management/lib
+ ln -s ../../../var/log/cloudstack/management debian/tmp/usr/share/cloudstack-management/logs
+ ln -s ../../../var/cache/cloudstack/management/temp debian/tmp/usr/share/cloudstack-management/temp
+ ln -s ../../../var/cache/cloudstack/management/work debian/tmp/usr/share/cloudstack-management/work
-binary: binary-common
-binary-common:
- dh_testdir
- dh_testroot
+ # cloudstack-common
+ mkdir -p debian/tmp/usr/share/cloudstack-common
+ mkdir debian/tmp/usr/share/cloudstack-common/scripts
+ mkdir debian/tmp/usr/share/cloudstack-common/setup
+ cp -r scripts/installer debian/tmp/usr/share/cloudstack-common/scripts
+ cp -r scripts/network debian/tmp/usr/share/cloudstack-common/scripts
+ cp -r scripts/storage debian/tmp/usr/share/cloudstack-common/scripts
+ cp -r scripts/util debian/tmp/usr/share/cloudstack-common/scripts
+ cp -r scripts/vm debian/tmp/usr/share/cloudstack-common/scripts
+ install -D client/target/utilities/bin/cloud-migrate-databases debian/tmp/usr/bin
+ install -D client/target/utilities/bin/cloud-set-guest-password debian/tmp/usr/bin
+ install -D client/target/utilities/bin/cloud-set-guest-sshkey debian/tmp/usr/bin
+ install -D client/target/utilities/bin/cloud-setup-databases debian/tmp/usr/bin
+ install -D client/target/utilities/bin/cloud-setup-management debian/tmp/usr/bin
+ install -D console-proxy/dist/systemvm.iso debian/tmp/usr/share/cloudstack-common/vms/systemvm.iso
+ install -D core/target/cloud-core-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-common/lib/cloudstack-core.jar
+ install -D api/target/cloud-api-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-common/lib/cloudstack-api.jar
+
+ # cloudstack-python
+ mkdir -p debian/tmp/usr/lib/python2.7/dist-packages
+ cp -r python/lib/cloud* debian/tmp/usr/lib/python2.7/dist-packages
+
+ # cloudstack-usage
+ mkdir debian/tmp/etc/cloud/usage
+ mkdir debian/tmp/var/log/cloudstack/usage
+ install -D usage/target/cloud-usage-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-usage/lib/cloudstack-usage.jar
+ cp usage/target/transformed/* debian/tmp/etc/cloud/usage/
+ ln -s ../management/db.properties debian/tmp/etc/cloud/usage/db.properties
+ install -D packaging/debian/init/cloud-usage debian/tmp/etc/init.d
+
+ # cloudstack-awsapi
+ mkdir debian/tmp/var/log/cloudstack/awsapi
+
+ dh_installdirs
+ dh_install
+ dh_python2
+
+binary: install
+ dh_install
dh_installchangelogs
dh_installdocs LICENSE
dh_installdocs DISCLAIMER
diff --git a/docs/en-US/event-framework.xml b/docs/en-US/event-framework.xml
new file mode 100644
index 00000000000..88c45c9033d
--- /dev/null
+++ b/docs/en-US/event-framework.xml
@@ -0,0 +1,110 @@
+
+
+%BOOK_ENTITIES;
+]>
+
+
+
+ Event Notification
+ Event notification framework provides a means for the Management Server components to
+ publish and subscribe to &PRODUCT; events. Event notification is achieved by implementing the
+ concept of event bus abstraction in the Management Server. An event bus is introduced in the
+ Management Server that allows the &PRODUCT;components and extension plug-ins to subscribe to the
+ events by using the Advanced Message Queuing Protocol (AMQP) client. In &PRODUCT;, a default
+ implementation of event bus is provided as a plug-in that uses the RabbitMQ AMQP client. The
+ AMQP client pushes the published events to a compatible AMQP server. Therefore all the &PRODUCT;
+ events are published to an exchange in the AMQP server.
+ A new event for state change, resource state change, is introduced as part of Event
+ notification framework. Every resource, such as user VM, volume, NIC, network, public IP,
+ snapshot, and template, is associated with a state machine and generates events as part of the
+ state change. That implies that a change in the state of a resource results in a state change
+ event, and the event is published in the corresponding state machine on the event bus. All the
+ &PRODUCT; events (alerts, action events, usage events) and the additional category of resource
+ state change events, are published on to the events bus.
+
+ Use Cases
+ The following are some of the use cases:
+
+
+
+ Usage or Billing Engines: A third-party cloud usage solution can implement a plug-in
+ that can connects to &PRODUCT; to subscribe to &PRODUCT; events and generate usage data. The
+ usage data is consumed by their usage software.
+
+
+ AMQP plug-in can place all the events on the a message queue, then a AMQP message broker
+ can provide topic-based notification to the subscribers.
+
+
+ Publish and Subscribe notification service can be implemented as a pluggable service in
+ &PRODUCT; that can provide rich set of APIs for event notification, such as topics-based
+ subscription and notification. Additionally, the pluggable service can deal with
+ multi-tenancy, authentication, and authorization issues.
+
+
+
+ Configuration
+ As a &PRODUCT; administrator, perform the following one-time configuration to enable event
+ notification framework. At run time no changes can control the behaviour.
+
+
+
+ Open 'componentContext.xml.
+
+
+ Define a bean named eventNotificationBus as follows:
+
+
+ name : Specify a name for the bean.
+
+
+ server : The name or the IP address of the RabbitMQ AMQP server.
+
+
+ port : The port on which RabbitMQ server is running.
+
+
+ username : The username associated with the account to access the RabbitMQ
+ server.
+
+
+ password : The password associated with the username of the account to access the
+ RabbitMQ server.
+
+
+ exchange : The exchange name on the RabbitMQ server where &PRODUCT; events are
+ published.
+ A sample bean is given below:
+ <bean id="eventNotificationBus" class="org.apache.cloudstack.mom.rabbitmq.RabbitMQEventBus">
+ <property name="name" value="eventNotificationBus"/>
+ <property name="server" value="127.0.0.1"/>
+ <property name="port" value="5672"/>
+ <property name="username" value="guest"/>
+ <property name="password" value="guest"/>
+ <property name="exchange" value="cloudstack-events"/>
+ </bean>
+ The eventNotificationBus bean represents the
+ org.apache.cloudstack.mom.rabbitmq.RabbitMQEventBus class.
+
+
+
+
+ Restart the Management Server.
+
+
+
diff --git a/docs/en-US/events.xml b/docs/en-US/events.xml
index 242ff4511ff..49ef86e62b5 100644
--- a/docs/en-US/events.xml
+++ b/docs/en-US/events.xml
@@ -21,11 +21,17 @@
specific language governing permissions and limitations
under the License.
-->
-
- Events
-
-
-
-
+ Events
+ An event is essentially a significant or meaningful change in the state of both virtual and
+ physical resources associated with a cloud environment. Events are used by monitoring systems,
+ usage and billing systems, or any other event-driven workflow systems to discern a pattern and
+ make the right business decision. In &PRODUCT; an event could be a state change of virtual or
+ psychical resources, an action performed by an user (action events), or policy based events
+ (alerts).
+
+
+
+
+
diff --git a/docs/publican-cloudstack/defaults.cfg b/docs/publican-cloudstack/defaults.cfg
index 6aebaee5a83..9e27bdd309d 100644
--- a/docs/publican-cloudstack/defaults.cfg
+++ b/docs/publican-cloudstack/defaults.cfg
@@ -16,6 +16,6 @@
# specific language governing permissions and limitations
# under the License.
-doc_url: "http://docs.cloudstack.org"
+doc_url: "http://incubator.apache.org/cloudstack/docs"
prod_url: "http://cloudstack.org"
diff --git a/docs/publican-cloudstack/en-US/Legal_Notice.xml b/docs/publican-cloudstack/en-US/Legal_Notice.xml
index 0e4be5bf56a..5e30efb07c0 100644
--- a/docs/publican-cloudstack/en-US/Legal_Notice.xml
+++ b/docs/publican-cloudstack/en-US/Legal_Notice.xml
@@ -53,6 +53,15 @@
completeness or stability of the code, it does indicate that the project
has yet to be fully endorsed by the ASF.
+
+
+ CloudStack® is a registered trademark of the Apache Software Foundation.
+
+
+
+ Apache CloudStack, the CloudStack word design, the Apache CloudStack word design, and the cloud monkey logo are trademarks of the
+ Apache Software Foundation.
+
diff --git a/docs/publican-cloudstack/en-US/images/title_logo.svg b/docs/publican-cloudstack/en-US/images/title_logo.svg
index a5888de4cf4..1d2913bf50c 100644
--- a/docs/publican-cloudstack/en-US/images/title_logo.svg
+++ b/docs/publican-cloudstack/en-US/images/title_logo.svg
@@ -1,5 +1,6 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/engine/storage/integration-test/test/resource/storageContext.xml b/engine/storage/integration-test/test/resource/storageContext.xml
index 4f55e243bac..7c5382d49f9 100644
--- a/engine/storage/integration-test/test/resource/storageContext.xml
+++ b/engine/storage/integration-test/test/resource/storageContext.xml
@@ -23,10 +23,8 @@
-
-
-
+
@@ -34,50 +32,12 @@
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
- org.apache.cloudstack.framework
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java b/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java
index b709991ee57..f441f39ddfa 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java
@@ -40,24 +40,24 @@ public class HypervsiorHostEndPointRpcServer implements HostEndpointRpcServer {
private static final Logger s_logger = Logger.getLogger(HypervsiorHostEndPointRpcServer.class);
@Inject
- private RpcProvider _rpcProvider;
+ private RpcProvider rpcProvider;
public HypervsiorHostEndPointRpcServer() {
}
public HypervsiorHostEndPointRpcServer(RpcProvider rpcProvider) {
- _rpcProvider = rpcProvider;
- _rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
+ rpcProvider = rpcProvider;
+ rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
}
@PostConstruct
public void Initialize() {
- _rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
+ rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
}
@Override
public void sendCommandAsync(HypervisorHostEndPoint host, final Command command, final AsyncCompletionCallback callback) {
- _rpcProvider.newCall(host.getHostAddr()).addCallbackListener(new RpcCallbackListener() {
+ rpcProvider.newCall(host.getHostAddr()).addCallbackListener(new RpcCallbackListener() {
@Override
public void onSuccess(Answer result) {
callback.complete(result);
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
new file mode 100755
index 00000000000..4c5f0e6cccf
--- /dev/null
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
@@ -0,0 +1,192 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.allocator;
+
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+import org.apache.log4j.Logger;
+
+import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.Volume;
+import com.cloud.storage.Volume.Type;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.StoragePoolDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.user.Account;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.component.AdapterBase;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
+
+public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator {
+ private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class);
+ @Inject StorageManager storageMgr;
+ protected @Inject StoragePoolDao _storagePoolDao;
+ @Inject VolumeDao _volumeDao;
+ @Inject ConfigurationDao _configDao;
+ @Inject ClusterDao _clusterDao;
+ protected @Inject DataStoreManager dataStoreMgr;
+ protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1);
+ long _extraBytesPerVolume = 0;
+ Random _rand;
+ boolean _dontMatter;
+ protected String _allocationAlgorithm = "random";
+ @Inject
+ DiskOfferingDao _diskOfferingDao;
+
+ @Override
+ public boolean configure(String name, Map params) throws ConfigurationException {
+ super.configure(name, params);
+
+ Map configs = _configDao.getConfiguration(null, params);
+
+ String globalStorageOverprovisioningFactor = configs.get("storage.overprovisioning.factor");
+ _storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
+
+ _extraBytesPerVolume = 0;
+
+ _rand = new Random(System.currentTimeMillis());
+
+ _dontMatter = Boolean.parseBoolean(configs.get("storage.overwrite.provisioning"));
+
+ String allocationAlgorithm = configs.get("vm.allocation.algorithm");
+ if (allocationAlgorithm != null) {
+ _allocationAlgorithm = allocationAlgorithm;
+ }
+
+ return true;
+ }
+
+ protected abstract List select(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo);
+
+ @Override
+ public
+ List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+ List pools = select(dskCh, vmProfile, plan, avoid, returnUpTo);
+ return reOrder(pools, vmProfile, plan);
+ }
+
+ protected List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List pools, Account account) {
+ if(account == null){
+ return pools;
+ }
+ long dcId = plan.getDataCenterId();
+ Long podId = plan.getPodId();
+ Long clusterId = plan.getClusterId();
+
+ List poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("List of pools in ascending order of number of volumes for account id: "+ account.getAccountId() + " is: "+ poolIdsByVolCount);
+ }
+
+ //now filter the given list of Pools by this ordered list
+ Map poolMap = new HashMap();
+ for (StoragePool pool : pools) {
+ poolMap.put(pool.getId(), pool);
+ }
+ List matchingPoolIds = new ArrayList(poolMap.keySet());
+
+ poolIdsByVolCount.retainAll(matchingPoolIds);
+
+ List reorderedPools = new ArrayList();
+ for(Long id: poolIdsByVolCount){
+ reorderedPools.add(poolMap.get(id));
+ }
+
+ return reorderedPools;
+ }
+
+ protected List reOrder(List pools,
+ VirtualMachineProfile extends VirtualMachine> vmProfile,
+ DeploymentPlan plan) {
+ Account account = null;
+ if(vmProfile.getVirtualMachine() != null){
+ account = vmProfile.getOwner();
+ }
+
+ if(_allocationAlgorithm.equals("random") || _allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
+ // Shuffle this so that we don't check the pools in the same order.
+ Collections.shuffle(pools);
+ }else if(_allocationAlgorithm.equals("userdispersing")){
+ pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
+ }
+ return pools;
+ }
+
+ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
+ DeploymentPlan plan) {
+
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName()+ " ,poolId: "+ pool.getId());
+ }
+ if (avoid.shouldAvoid(pool)) {
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("StoragePool is in avoid set, skipping this pool");
+ }
+ return false;
+ }
+
+ if(dskCh.getType().equals(Type.ROOT) && pool.getPoolType().equals(StoragePoolType.Iscsi)){
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("Disk needed for ROOT volume, but StoragePoolType is Iscsi, skipping this and trying other available pools");
+ }
+ return false;
+ }
+
+ DiskOfferingVO diskOffering = _diskOfferingDao.findById(dskCh.getDiskOfferingId());
+ if (diskOffering.getSystemUse() && pool.getPoolType() == StoragePoolType.RBD) {
+ s_logger.debug("Skipping RBD pool " + pool.getName() + " as a suitable pool. RBD is not supported for System VM's");
+ return false;
+ }
+
+
+ Long clusterId = pool.getClusterId();
+ ClusterVO cluster = _clusterDao.findById(clusterId);
+ if (!(cluster.getHypervisorType() == dskCh.getHypersorType())) {
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool");
+ }
+ return false;
+ }
+
+ // check capacity
+ Volume volume = _volumeDao.findById(dskCh.getVolumeId());
+ List requestVolumes = new ArrayList();
+ requestVolumes.add(volume);
+ return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
+ }
+}
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
new file mode 100644
index 00000000000..747e2586fed
--- /dev/null
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
@@ -0,0 +1,105 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.allocator;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import javax.ejb.Local;
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.offering.ServiceOffering;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
+
+@Component
+@Local(value=StoragePoolAllocator.class)
+public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocator {
+ private static final Logger s_logger = Logger.getLogger(ClusterScopeStoragePoolAllocator.class);
+ protected String _allocationAlgorithm = "random";
+
+ @Inject
+ DiskOfferingDao _diskOfferingDao;
+
+ @Override
+ protected List select(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+
+ List suitablePools = new ArrayList();
+
+ long dcId = plan.getDataCenterId();
+ Long podId = plan.getPodId();
+ Long clusterId = plan.getClusterId();
+
+ if(dskCh.getTags() != null && dskCh.getTags().length != 0){
+ s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags()));
+ }else{
+ s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
+ }
+
+ List pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
+ if (pools.size() == 0) {
+ if (s_logger.isDebugEnabled()) {
+ String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString();
+ s_logger.debug("No storage pools available for " + storageType + " volume allocation, returning");
+ }
+ return suitablePools;
+ }
+
+ for (StoragePoolVO pool: pools) {
+ if(suitablePools.size() == returnUpTo){
+ break;
+ }
+ StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
+ if (filter(avoid, pol, dskCh, plan)) {
+ suitablePools.add(pol);
+ }
+ }
+
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools");
+ }
+
+ return suitablePools;
+ }
+
+ @Override
+ public boolean configure(String name, Map params) throws ConfigurationException {
+ super.configure(name, params);
+
+ if (_configDao != null) {
+ Map configs = _configDao.getConfiguration(params);
+ String allocationAlgorithm = configs.get("vm.allocation.algorithm");
+ if (allocationAlgorithm != null) {
+ _allocationAlgorithm = allocationAlgorithm;
+ }
+ }
+ return true;
+ }
+}
diff --git a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java
similarity index 82%
rename from server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java
rename to engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java
index 4eeae280d8b..91bc25c715d 100644
--- a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java
@@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
-package com.cloud.storage.allocator;
+package org.apache.cloudstack.storage.allocator;
import java.util.List;
import java.util.Map;
@@ -23,8 +23,8 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.deploy.DeploymentPlan;
@@ -36,32 +36,18 @@ import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
-@Component
@Local(value=StoragePoolAllocator.class)
public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(GarbageCollectingStoragePoolAllocator.class);
StoragePoolAllocator _firstFitStoragePoolAllocator;
StoragePoolAllocator _localStoragePoolAllocator;
- @Inject StorageManager _storageMgr;
+ @Inject StorageManager storageMgr;
@Inject ConfigurationDao _configDao;
boolean _storagePoolCleanupEnabled;
@Override
- public boolean allocatorIsCorrectType(DiskProfile dskCh) {
- return true;
- }
-
- public Integer getStorageOverprovisioningFactor() {
- return null;
- }
-
- public Long getExtraBytesPerVolume() {
- return null;
- }
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+ public List select(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
if (!_storagePoolCleanupEnabled) {
s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
@@ -69,10 +55,10 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
}
// Clean up all storage pools
- _storageMgr.cleanupStorage(false);
+ storageMgr.cleanupStorage(false);
// Determine what allocator to use
StoragePoolAllocator allocator;
- if (localStorageAllocationNeeded(dskCh)) {
+ if (dskCh.useLocalStorage()) {
allocator = _localStoragePoolAllocator;
} else {
allocator = _firstFitStoragePoolAllocator;
@@ -88,7 +74,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
public boolean configure(String name, Map params) throws ConfigurationException {
super.configure(name, params);
- _firstFitStoragePoolAllocator = ComponentContext.inject(FirstFitStoragePoolAllocator.class);
+ _firstFitStoragePoolAllocator = ComponentContext.inject(ClusterScopeStoragePoolAllocator.class);
_firstFitStoragePoolAllocator.configure("GCFirstFitStoragePoolAllocator", params);
_localStoragePoolAllocator = ComponentContext.inject(LocalStoragePoolAllocator.class);
_localStoragePoolAllocator.configure("GCLocalStoragePoolAllocator", params);
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
new file mode 100644
index 00000000000..a8d5173cebe
--- /dev/null
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
@@ -0,0 +1,126 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.allocator;
+
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import javax.ejb.Local;
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.capacity.dao.CapacityDao;
+import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.Volume;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
+
+@Component
+@Local(value = StoragePoolAllocator.class)
+public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
+ private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class);
+
+ @Inject
+ StoragePoolHostDao _poolHostDao;
+ @Inject
+ VMInstanceDao _vmInstanceDao;
+ @Inject
+ UserVmDao _vmDao;
+ @Inject
+ ServiceOfferingDao _offeringDao;
+ @Inject
+ CapacityDao _capacityDao;
+ @Inject
+ ConfigurationDao _configDao;
+
+ @Override
+ protected List select(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+
+ List suitablePools = new ArrayList();
+
+
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
+ }
+
+ // data disk and host identified from deploying vm (attach volume case)
+ if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
+ List hostPools = _poolHostDao.listByHostId(plan.getHostId());
+ for (StoragePoolHostVO hostPool: hostPools) {
+ StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId());
+ if (pool != null && pool.isLocal()) {
+ StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
+ if (filter(avoid, pol, dskCh, plan)) {
+ s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
+ suitablePools.add(pol);
+ }
+ }
+
+ if (suitablePools.size() == returnUpTo) {
+ break;
+ }
+ }
+ } else {
+ List availablePools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), dskCh.getTags());
+ for (StoragePoolVO pool : availablePools) {
+ if (suitablePools.size() == returnUpTo) {
+ break;
+ }
+ StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
+ if (filter(avoid, pol, dskCh, plan)) {
+ suitablePools.add(pol);
+ }
+ }
+ }
+
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
+ }
+
+ return suitablePools;
+ }
+
+ @Override
+ public boolean configure(String name, Map params) throws ConfigurationException {
+ super.configure(name, params);
+
+ _storageOverprovisioningFactor = new BigDecimal(1);
+ _extraBytesPerVolume = NumbersUtil.parseLong((String) params.get("extra.bytes.per.volume"), 50 * 1024L * 1024L);
+
+ return true;
+ }
+
+ public LocalStoragePoolAllocator() {
+ }
+}
diff --git a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/UseLocalForRootAllocator.java
similarity index 74%
rename from server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java
rename to engine/storage/src/org/apache/cloudstack/storage/allocator/UseLocalForRootAllocator.java
index 2c19406fef6..4663b12e97e 100644
--- a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/UseLocalForRootAllocator.java
@@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
-package com.cloud.storage.allocator;
+package org.apache.cloudstack.storage.allocator;
import java.util.List;
import java.util.Map;
@@ -23,23 +23,17 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
-import org.springframework.stereotype.Component;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
-import com.cloud.configuration.Config;
-import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.host.Host;
import com.cloud.storage.StoragePool;
-import com.cloud.storage.Volume.Type;
-
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
-@Component
@Local(value=StoragePoolAllocator.class)
public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implements StoragePoolAllocator {
@@ -55,29 +49,13 @@ public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implemen
return super.allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo);
}
-
- @Override
- public String chooseStorageIp(VirtualMachine vm, Host host, Host storage) {
- return null;
- }
@Override
public boolean configure(String name, Map params) throws ConfigurationException {
super.configure(name, params);
return true;
}
-
- @Override
- protected boolean localStorageAllocationNeeded(DiskProfile dskCh) {
- if (dskCh.getType() == Type.ROOT) {
- return true;
- } else if (dskCh.getType() == Type.DATADISK) {
- return false;
- } else {
- return super.localStorageAllocationNeeded(dskCh);
- }
- }
-
+
protected UseLocalForRootAllocator() {
}
}
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
new file mode 100644
index 00000000000..6f7849737f4
--- /dev/null
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
@@ -0,0 +1,80 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.allocator;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.Volume;
+import com.cloud.storage.dao.StoragePoolDao;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
+
+@Component
+public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
+ private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class);
+ @Inject StoragePoolDao _storagePoolDao;
+ @Inject DataStoreManager dataStoreMgr;
+
+ @Override
+ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
+ DeploymentPlan plan) {
+ Volume volume = _volumeDao.findById(dskCh.getVolumeId());
+ List requestVolumes = new ArrayList();
+ requestVolumes.add(volume);
+ return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
+ }
+
+ @Override
+ protected List select(DiskProfile dskCh,
+ VirtualMachineProfile extends VirtualMachine> vmProfile,
+ DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+ List suitablePools = new ArrayList();
+ HypervisorType hypervisor = vmProfile.getHypervisorType();
+ if (hypervisor != null) {
+ if (hypervisor != HypervisorType.KVM) {
+ s_logger.debug("Only kvm supports zone wide storage");
+ return suitablePools;
+ }
+ }
+
+ List storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
+
+ for (StoragePoolVO storage : storagePools) {
+ if (suitablePools.size() == returnUpTo) {
+ break;
+ }
+ StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
+ if (filter(avoid, pol, dskCh, plan)) {
+ suitablePools.add(pol);
+ }
+ }
+ return suitablePools;
+ }
+}
diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
index c067a1b651c..cfd9f400839 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
@@ -100,7 +100,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
@Inject
ConfigurationDao configDao;
@Inject
- StorageManager storagMgr;
+ StorageManager storageMgr;
@Inject
VolumeDao volDao;
@Inject
@@ -149,7 +149,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
CopyVolumeAnswer cvAnswer = null;
String errMsg = null;
try {
- cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(destPool,
+ cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool,
cvCmd);
} catch (StorageUnavailableException e1) {
s_logger.debug("Failed to copy volume " + srcData.getId() + " to "
@@ -231,7 +231,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
"2.1");
Answer answer = null;
try {
- answer = this.storagMgr.sendToPool(pool, cmd);
+ answer = this.storageMgr.sendToPool(pool, cmd);
} catch (StorageUnavailableException e) {
} finally {
snapshotDao.unlockFromLockTable(snapshotId.toString());
@@ -268,7 +268,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
+ snapshotId
+ " due to this snapshot is being used, try it later ");
}
- answer = (CreateVolumeFromSnapshotAnswer) this.storagMgr
+ answer = (CreateVolumeFromSnapshotAnswer) this.storageMgr
.sendToPool(pool, createVolumeFromSnapshotCommand);
if (answer != null && answer.getResult()) {
vdiUUID = answer.getVdi();
@@ -306,7 +306,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
StoragePool pool = (StoragePool)volume.getDataStore();
String errMsg = null;
try {
- answer = storagMgr.sendToPool(pool, null, cmd);
+ answer = storageMgr.sendToPool(pool, null, cmd);
} catch (StorageUnavailableException e) {
s_logger.debug("Failed to send to storage pool", e);
throw new CloudRuntimeException("Failed to send to storage pool", e);
@@ -358,7 +358,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
_copyvolumewait);
CopyVolumeAnswer cvAnswer;
try {
- cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(srcPool, cvCmd);
+ cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(srcPool, cvCmd);
} catch (StorageUnavailableException e1) {
throw new CloudRuntimeException(
"Failed to copy the volume from the source primary storage pool to secondary storage.",
@@ -376,7 +376,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
secondaryStorageVolumePath, destPool,
secondaryStorageURL, false, _copyvolumewait);
try {
- cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(destPool, cvCmd);
+ cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool, cvCmd);
} catch (StorageUnavailableException e1) {
throw new CloudRuntimeException(
"Failed to copy the volume from secondary storage to the destination primary storage pool.");
@@ -464,7 +464,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
Long volumeId = snapshot.getVolumeId();
String origTemplateInstallPath = null;
- List pools = this.storagMgr
+ List pools = this.storageMgr
.ListByDataCenterHypervisor(zoneId,
snapshot.getHypervisorType());
if (pools == null || pools.size() == 0) {
@@ -516,7 +516,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
Answer answer = null;
try {
- answer = this.storagMgr.sendToPool(pool, cmd);
+ answer = this.storageMgr.sendToPool(pool, cmd);
cmd = null;
} catch (StorageUnavailableException e) {
} finally {
@@ -557,7 +557,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
CreatePrivateTemplateAnswer answer = null;
try {
- answer = (CreatePrivateTemplateAnswer) this.storagMgr.sendToPool(
+ answer = (CreatePrivateTemplateAnswer) this.storageMgr.sendToPool(
pool, cmd);
} catch (StorageUnavailableException e) {
throw new CloudRuntimeException(
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java
similarity index 98%
rename from engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java
rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java
index 2167ba19a32..7a5b0d06020 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java
@@ -56,6 +56,7 @@ import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.server.ManagementServer;
import com.cloud.storage.OCFS2Manager;
@@ -94,10 +95,10 @@ import com.cloud.vm.dao.SecondaryStorageVmDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
-public class AncientPrimaryDataStoreLifeCyclImpl implements
+public class AncientPrimaryDataStoreLifeCycleImpl implements
PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger
- .getLogger(AncientPrimaryDataStoreLifeCyclImpl.class);
+ .getLogger(AncientPrimaryDataStoreLifeCycleImpl.class);
@Inject
protected ResourceManager _resourceMgr;
protected List _discoverers;
@@ -134,9 +135,6 @@ public class AncientPrimaryDataStoreLifeCyclImpl implements
protected StoragePoolHostDao _storagePoolHostDao;
@Inject
protected AlertManager _alertMgr;
-
-
-
@Inject
protected ConsoleProxyDao _consoleProxyDao;
@@ -223,10 +221,6 @@ public class AncientPrimaryDataStoreLifeCyclImpl implements
}
pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem,
storageHost, port, hostPath);
- if (clusterId == null) {
- throw new IllegalArgumentException(
- "NFS need to have clusters specified for XenServers");
- }
} else if (scheme.equalsIgnoreCase("file")) {
if (port == -1) {
port = 0;
@@ -466,7 +460,18 @@ public class AncientPrimaryDataStoreLifeCyclImpl implements
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope) {
- StoragePoolVO pool = this.primaryDataStoreDao.findById(dataStore.getId());
+ List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
+ for (HostVO host : hosts) {
+ try {
+ this.storageMgr.connectHostToSharedPool(host.getId(),
+ dataStore.getId());
+ } catch (Exception e) {
+ s_logger.warn("Unable to establish a connection between " + host
+ + " and " + dataStore, e);
+ }
+ }
+ StoragePoolVO pool = this.primaryDataStoreDao.findById(dataStore.getId());
+
pool.setScope(ScopeType.ZONE);
pool.setStatus(StoragePoolStatus.Up);
this.primaryDataStoreDao.update(pool.getId(), pool);
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java
index e7d65167eac..09e78e45659 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java
@@ -27,7 +27,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl;
-import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCyclImpl;
+import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCycleImpl;
import org.springframework.stereotype.Component;
import com.cloud.utils.component.ComponentContext;
@@ -55,7 +55,7 @@ public class AncientPrimaryDataStoreProviderImpl implements
@Override
public boolean configure(Map params) {
- lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCyclImpl.class);
+ lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCycleImpl.class);
driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class);
uuid = (String)params.get("uuid");
id = (Long)params.get("id");
diff --git a/framework/events/src/org/apache/cloudstack/framework/events/EventBus.java b/framework/events/src/org/apache/cloudstack/framework/events/EventBus.java
index c16ee6f96f4..b83e3b28a7a 100644
--- a/framework/events/src/org/apache/cloudstack/framework/events/EventBus.java
+++ b/framework/events/src/org/apache/cloudstack/framework/events/EventBus.java
@@ -19,15 +19,13 @@
package org.apache.cloudstack.framework.events;
-import com.cloud.utils.component.Adapter;
-
import java.util.UUID;
/**
* Interface to publish and subscribe to CloudStack events
*
*/
-public interface EventBus extends Adapter{
+public interface EventBus {
/**
* publish an event on to the event bus
diff --git a/framework/rest/pom.xml b/framework/rest/pom.xml
index 2a22155603a..a783bc8de2f 100644
--- a/framework/rest/pom.xml
+++ b/framework/rest/pom.xml
@@ -67,6 +67,10 @@
org.eclipse.jetty
jetty-server
+
+ org.apache.geronimo.specs
+ geronimo-servlet_3.0_spec
+
diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec
index 0c5e615593e..e345679addf 100644
--- a/packaging/centos63/cloud.spec
+++ b/packaging/centos63/cloud.spec
@@ -317,7 +317,7 @@ fi
%post awsapi
if [ -d "%{_datadir}/%{name}-management" ] ; then
- ln %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080
+ ln -s %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080
fi
#No default permission as the permission setup is complex
diff --git a/packaging/debian/init/cloud-agent b/packaging/debian/init/cloud-agent
new file mode 100755
index 00000000000..8b646935e8f
--- /dev/null
+++ b/packaging/debian/init/cloud-agent
@@ -0,0 +1,167 @@
+#!/bin/bash
+
+### BEGIN INIT INFO
+# Provides: cloud agent
+# Required-Start: $network $local_fs
+# Required-Stop: $network $local_fs
+# Default-Start: 3 4 5
+# Default-Stop: 0 1 2 6
+# Short-Description: Start/stop Apache CloudStack Agent
+# Description: This scripts Starts/Stops the Apache CloudStack agent
+## The CloudStack Agent is a part of the Apache CloudStack project and is used
+## for managing KVM-based Hypervisors and performing secondary storage tasks inside
+## the Secondary Storage System Virtual Machine.
+## JSVC (Java daemonizing) is used for starting and stopping the agent
+### END INIT INFO
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+. /lib/lsb/init-functions
+
+SHORTNAME="cloud-agent"
+PIDFILE=/var/run/"$SHORTNAME".pid
+LOCKFILE=/var/lock/subsys/"$SHORTNAME"
+PROGNAME="CloudStack Agent"
+CLASS="com.cloud.agent.AgentShell"
+PROG="jsvc"
+DAEMON="/usr/bin/jsvc"
+SHUTDOWN_WAIT="30"
+
+unset OPTIONS
+[ -r /etc/default/"$SHORTNAME" ] && source /etc/default/"$SHORTNAME"
+
+# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined in $DEFAULT)
+JDK_DIRS="/usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-openjdk-i386 /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-sun /usr/lib/jvm/java-1.5.0-sun /usr/lib/j2sdk1.5-sun /usr/lib/j2sdk1.5-ibm"
+
+for jdir in $JDK_DIRS; do
+ if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then
+ JAVA_HOME="$jdir"
+ fi
+done
+export JAVA_HOME
+
+# We need to append the JSVC daemon JAR to the classpath
+# AgentShell implements the JSVC daemon methods
+export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/cloud-agent-kvm.jar:/etc/cloud/agent"
+
+wait_for_network() {
+ i=1
+ while [ $i -lt 10 ]
+ do
+ # Under Ubuntu and Debian libvirt by default creates a bridge called virbr0.
+ # That's why we want more then 3 lines back from brctl, so that there is a manually created bridge
+ if [ "$(brctl show|wc -l)" -gt 2 ]; then
+ break
+ else
+ sleep 1
+ let i=$i+1
+ continue
+ fi
+ done
+}
+
+start() {
+ if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
+ log_daemon_msg "$PROGNAME apparently already running"
+ log_end_msg 0
+ exit 0
+ fi
+
+ log_daemon_msg "Starting $PROGNAME" "$SHORTNAME"
+ if hostname --fqdn >/dev/null 2>&1 ; then
+ true
+ else
+ log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME"
+ log_end_msg 1
+ exit 1
+ fi
+
+ wait_for_network
+
+ if start_daemon -p $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -errfile SYSLOG -D jna.nosys=true $CLASS
+ RETVAL=$?
+ then
+ rc=0
+ sleep 1
+ if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
+ log_failure_msg "$PROG failed to start"
+ rc=1
+ fi
+ else
+ rc=1
+ fi
+
+ if [ $rc -eq 0 ]; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ rm -f "$PIDFILE"
+ fi
+}
+
+stop() {
+ count="0"
+
+ log_daemon_msg "Stopping $PROGNAME" "$SHORTNAME"
+ killproc -p $PIDFILE $DAEMON
+
+ until [ "$count" -gt "$SHUTDOWN_WAIT" ]
+ do
+ agentPid=$(ps aux|grep [j]svc|grep $SHORTNAME)
+ if [ "$?" -gt "0" ];then
+ break
+ fi
+ sleep 1
+ let count="${count}+1"
+ done
+
+ agentPid=$(ps aux|grep [j]svc|grep $SHORTNAME)
+ if [ "$?" -eq "0" ]; then
+ agentPid=$(ps aux|grep [j]svc|awk '{print $2}')
+ if [ "$agentPid" != "" ]; then
+ log_warning_msg "$PROG still running, forcing kill"
+ kill -9 $agentPid
+ fi
+ fi
+
+ log_end_msg $?
+ rm -f "$PIDFILE"
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status_of_proc -p "$PIDFILE" "$PROG" "$SHORTNAME"
+ RETVAL=$?
+ ;;
+ restart | force-reload)
+ stop
+ sleep 3
+ start
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|force-reload|status}"
+ RETVAL=3
+esac
+
+exit $RETVAL
diff --git a/packaging/debian/init/cloud-management b/packaging/debian/init/cloud-management
new file mode 100755
index 00000000000..490bf1e8e68
--- /dev/null
+++ b/packaging/debian/init/cloud-management
@@ -0,0 +1,244 @@
+#!/bin/sh
+#
+# /etc/init.d/tomcat6 -- startup script for the Tomcat 6 servlet engine
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+### BEGIN INIT INFO
+# Provides: tomcat-vmops
+# Required-Start: $local_fs $remote_fs $network
+# Required-Stop: $local_fs $remote_fs $network
+# Should-Start: $named
+# Should-Stop: $named
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Start Tomcat (CloudStack).
+# Description: Start the Tomcat servlet engine that runs the CloudStack Management Server.
+### END INIT INFO
+
+PATH=/bin:/usr/bin:/sbin:/usr/sbin
+NAME=cloud-management
+DESC="CloudStack-specific Tomcat servlet engine"
+DAEMON=/usr/bin/jsvc
+CATALINA_HOME=/usr/share/cloud/management
+DEFAULT=/etc/cloud/management/tomcat6.conf
+JVM_TMP=/tmp/$NAME-temp
+
+# We have to explicitly set the HOME variable to the homedir from the user "cloud"
+# This is because various scripts run by the management server read the HOME variable
+# and fail when this init script is run manually.
+HOME=$(echo ~cloud)
+
+if [ `id -u` -ne 0 ]; then
+ echo "You need root privileges to run this script"
+ exit 1
+fi
+
+# Make sure tomcat is started with system locale
+if [ -r /etc/default/locale ]; then
+ . /etc/default/locale
+ export LANG
+fi
+
+. /lib/lsb/init-functions
+. /etc/default/rcS
+
+
+# The following variables can be overwritten in $DEFAULT
+
+# Run Tomcat 6 as this user ID
+TOMCAT6_USER=tomcat6
+
+# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not
+# defined in $DEFAULT)
+JDK_DIRS="/usr/lib/jvm/java-1.6.0-openjdk-amd64/ /usr/lib/jvm/java-1.6.0-openjdk-i386/ /usr/lib/jvm/java-1.6.0-openjdk/ /usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-sun"
+
+# Look for the right JVM to use
+for jdir in $JDK_DIRS; do
+ if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then
+ JAVA_HOME="$jdir"
+ fi
+done
+export JAVA_HOME
+
+# Directory for per-instance configuration files and webapps
+CATALINA_BASE=/usr/share/cloud/management
+
+# Use the Java security manager? (yes/no)
+TOMCAT6_SECURITY=no
+
+# Default Java options
+# Set java.awt.headless=true if JAVA_OPTS is not set so the
+# Xalan XSL transformer can work without X11 display on JDK 1.4+
+# It also looks like the default heap size of 64M is not enough for most cases
+# so the maximum heap size is set to 128M
+if [ -z "$JAVA_OPTS" ]; then
+ JAVA_OPTS="-Djava.awt.headless=true -Xmx128M"
+fi
+
+# End of variables that can be overwritten in $DEFAULT
+
+# overwrite settings from default file
+if [ -f "$DEFAULT" ]; then
+ . "$DEFAULT"
+fi
+
+if [ ! -f "$CATALINA_HOME/bin/bootstrap.jar" ]; then
+ log_failure_msg "$NAME is not installed"
+ exit 1
+fi
+
+[ -f "$DAEMON" ] || exit 0
+
+POLICY_CACHE="$CATALINA_BASE/work/catalina.policy"
+
+JAVA_OPTS="$JAVA_OPTS -Djava.endorsed.dirs=$CATALINA_HOME/endorsed -Dcatalina.base=$CATALINA_BASE -Dcatalina.home=$CATALINA_HOME -Djava.io.tmpdir=$JVM_TMP"
+
+# Set the JSP compiler if set in the tomcat6.default file
+if [ -n "$JSP_COMPILER" ]; then
+ JAVA_OPTS="$JAVA_OPTS -Dbuild.compiler=$JSP_COMPILER"
+fi
+
+if [ "$TOMCAT6_SECURITY" = "yes" ]; then
+ JAVA_OPTS="$JAVA_OPTS -Djava.security.manager -Djava.security.policy=$POLICY_CACHE"
+fi
+
+# Set juli LogManager if logging.properties is provided
+if [ -r "$CATALINA_BASE"/conf/logging.properties ]; then
+ JAVA_OPTS="$JAVA_OPTS "-Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager" "-Djava.util.logging.config.file="$CATALINA_BASE/conf/logging.properties"
+fi
+
+# Define other required variables
+CATALINA_PID="/var/run/$NAME.pid"
+BOOTSTRAP_CLASS=org.apache.catalina.startup.Bootstrap
+JSVC_CLASSPATH="/usr/share/java/commons-daemon.jar:$CATALINA_HOME/bin/bootstrap.jar"
+JSVC_CLASSPATH=$CLASSPATH:$JSVC_CLASSPATH
+
+# Look for Java Secure Sockets Extension (JSSE) JARs
+if [ -z "${JSSE_HOME}" -a -r "${JAVA_HOME}/jre/lib/jsse.jar" ]; then
+ JSSE_HOME="${JAVA_HOME}/jre/"
+fi
+export JSSE_HOME
+
+case "$1" in
+ start)
+ if [ -z "$JAVA_HOME" ]; then
+ log_failure_msg "no JDK found - please set JAVA_HOME"
+ exit 1
+ fi
+
+ if [ ! -d "$CATALINA_BASE/conf" ]; then
+ log_failure_msg "invalid CATALINA_BASE: $CATALINA_BASE"
+ exit 1
+ fi
+
+ log_daemon_msg "Starting $DESC" "$NAME"
+ if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
+ --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
+ >/dev/null; then
+
+ # Regenerate POLICY_CACHE file
+ umask 022
+ echo "// AUTO-GENERATED FILE from /etc/tomcat6/policy.d/" \
+ > "$POLICY_CACHE"
+ echo "" >> "$POLICY_CACHE"
+ if ls $CATALINA_BASE/conf/policy.d/*.policy > /dev/null 2>&1 ; then
+ cat $CATALINA_BASE/conf/policy.d/*.policy \
+ >> "$POLICY_CACHE"
+ fi
+
+ # Remove / recreate JVM_TMP directory
+ rm -rf "$JVM_TMP"
+ mkdir "$JVM_TMP" || {
+ log_failure_msg "could not create JVM temporary directory"
+ exit 1
+ }
+ chown $TOMCAT6_USER "$JVM_TMP"
+ cd "$JVM_TMP"
+
+
+ # fix storage issues on nfs mounts
+ umask 000
+ $DAEMON -user "$TOMCAT6_USER" -cp "$JSVC_CLASSPATH" \
+ -outfile SYSLOG -errfile SYSLOG \
+ -pidfile "$CATALINA_PID" $JAVA_OPTS "$BOOTSTRAP_CLASS"
+
+ sleep 5
+ if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
+ --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
+ >/dev/null; then
+ log_end_msg 1
+ else
+ log_end_msg 0
+ fi
+ else
+ log_progress_msg "(already running)"
+ log_end_msg 0
+ fi
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
+ --user "$TOMCAT6_USER" --startas "$JAVA_HOME/bin/java" \
+ >/dev/null; then
+ log_progress_msg "(not running)"
+ else
+ $DAEMON -cp "$JSVC_CLASSPATH" -pidfile "$CATALINA_PID" \
+ -stop "$BOOTSTRAP_CLASS"
+ fi
+ rm -rf "$JVM_TMP"
+ log_end_msg 0
+ ;;
+ status)
+ if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
+ --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
+ >/dev/null; then
+
+ if [ -f "$CATALINA_PID" ]; then
+ log_success_msg "$DESC is not running, but pid file exists."
+ exit 1
+ else
+ log_success_msg "$DESC is not running."
+ exit 3
+ fi
+ else
+ log_success_msg "$DESC is running with pid `cat $CATALINA_PID`"
+ fi
+ ;;
+ restart|force-reload)
+ if start-stop-daemon --test --stop --pidfile "$CATALINA_PID" \
+ --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
+ >/dev/null; then
+ $0 stop
+ sleep 1
+ fi
+ $0 start
+ ;;
+ try-restart)
+ if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
+ --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
+ >/dev/null; then
+ $0 start
+ fi
+ ;;
+ *)
+ log_success_msg "Usage: $0 {start|stop|restart|try-restart|force-reload|status}"
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/packaging/debian/init/cloud-usage b/packaging/debian/init/cloud-usage
new file mode 100755
index 00000000000..6b17ea82977
--- /dev/null
+++ b/packaging/debian/init/cloud-usage
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+### BEGIN INIT INFO
+# Provides: cloud usage
+# Required-Start: $network $local_fs
+# Required-Stop: $network $local_fs
+# Default-Start: 3 4 5
+# Default-Stop: 0 1 2 6
+# Short-Description: Start/stop Apache CloudStack Usage Monitor
+# Description: This scripts Starts/Stops the Apache CloudStack Usage Monitor
+## The CloudStack Usage Monitor is a part of the Apache CloudStack project and is used
+## for storing usage statistics from instances.
+## JSVC (Java daemonizing) is used for starting and stopping the usage monitor.
+### END INIT INFO
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+. /lib/lsb/init-functions
+
+SHORTNAME="cloud-usage"
+PIDFILE=/var/run/"$SHORTNAME".pid
+LOGFILE=/var/log/cloud/usage/usage-server.log
+PROGNAME="CloudStack Usage Monitor"
+CLASS="com.cloud.usage.UsageServer"
+PROG="jsvc"
+DAEMON="/usr/bin/jsvc"
+USER=@MSUSER@
+
+unset OPTIONS
+[ -r @SYSCONFDIR@/default/"$SHORTNAME" ] && source @SYSCONFDIR@/default/"$SHORTNAME"
+
+# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined in $DEFAULT)
+JDK_DIRS="/usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-openjdk-i386 /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-sun /usr/lib/jvm/jre-1.6.0 /usr/lib/j2sdk1.5-sun /usr/lib/jre-openjdk"
+
+for jdir in $JDK_DIRS; do
+ if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then
+ JAVA_HOME="$jdir"
+ fi
+done
+export JAVA_HOME
+
+SCP="@SYSTEMCLASSPATH@"
+DCP="@DEPSCLASSPATH@"
+UCP="@USAGECLASSPATH@"
+JCP="/usr/share/java/commons-daemon.jar"
+
+# We need to append the JSVC daemon JAR to the classpath
+# AgentShell implements the JSVC daemon methods
+export CLASSPATH="$SCP:$DCP:$UCP:$JCP:@USAGESYSCONFDIR@"
+
+start() {
+ if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
+ log_daemon_msg "$PROGNAME apparently already running"
+ log_end_msg 0
+ exit 0
+ fi
+
+ log_daemon_msg "Starting $PROGNAME" "$SHORTNAME"
+ if hostname --fqdn >/dev/null 2>&1 ; then
+ true
+ else
+ log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME"
+ log_end_msg 1
+ exit 1
+ fi
+
+ if start_daemon -p $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -user "$USER" -outfile SYSLOG -errfile SYSLOG -Dpid=$$ $CLASS
+ RETVAL=$?
+ then
+ rc=0
+ sleep 1
+ if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
+ log_failure_msg "$PROG failed to start"
+ rc=1
+ fi
+ else
+ rc=1
+ fi
+
+ if [ $rc -eq 0 ]; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ rm -f "$PIDFILE"
+ fi
+}
+
+stop() {
+ log_daemon_msg "Stopping $PROGNAME" "$SHORTNAME"
+ killproc -p $PIDFILE $DAEMON
+ log_end_msg $?
+ rm -f "$PIDFILE"
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status_of_proc -p "$PIDFILE" "$PROG" "$SHORTNAME"
+ RETVAL=$?
+ ;;
+ restart | force-reload)
+ stop
+ sleep 3
+ start
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|force-reload|status}"
+ RETVAL=3
+esac
+
+exit $RETVAL
diff --git a/packaging/debian/replace.properties b/packaging/debian/replace.properties
index 6520f63e682..d891e795361 100644
--- a/packaging/debian/replace.properties
+++ b/packaging/debian/replace.properties
@@ -37,12 +37,12 @@ CONFIGUREVARS=
DEPSCLASSPATH=
DOCDIR=
IPALOCATORLOG=/var/log/cloud/management/ipallocator.log
-JAVADIR=/usr/share/java
+JAVADIR=/usr/share/cloudstack-management/webapps/client/WEB-INF/lib
LIBEXECDIR=/usr/libexec
LOCKDIR=/var/lock
MSCLASSPATH=
MSCONF=/etc/cloud/management
-MSENVIRON=/usr/share/cloud/management
+MSENVIRON=/usr/share/cloudstack-management
MSLOG=/var/log/cloud/management/management-server.log
MSLOGDIR=/var/log/cloud/management/
MSMNTDIR=/var/lib/cloud/mnt
@@ -52,7 +52,7 @@ PLUGINJAVADIR=
PREMIUMJAVADIR=
PYTHONDIR=/usr/lib/python2.6/site-packages/
SERVERSYSCONFDIR=/etc/cloud/server
-SETUPDATADIR=/usr/share/cloud/setup
+SETUPDATADIR=/usr/share/cloudstack-management/setup
SYSCONFDIR=/etc
SYSTEMCLASSPATH=
SYSTEMJARS=
diff --git a/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java b/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java
index ce0930d115d..1c0c6bef6f2 100644
--- a/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java
+++ b/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java
@@ -40,13 +40,17 @@ import java.util.concurrent.Executors;
public class RabbitMQEventBus extends ManagerBase implements EventBus {
// details of AMQP server
- private static String _amqpHost;
- private static Integer _port;
- private static String _username;
- private static String _password;
+ private static String amqpHost;
+ private static Integer port;
+ private static String username;
+ private static String password;
// AMQP exchange name where all CloudStack events will be published
- private static String _amqpExchangeName;
+ private static String amqpExchangeName;
+
+ private String name;
+
+ private static Integer retryInterval;
// hashmap to book keep the registered subscribers
private static ConcurrentHashMap> _subscribers;
@@ -58,59 +62,76 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
private static boolean _autoAck = true;
private ExecutorService executorService;
- private String _name;
private static DisconnectHandler disconnectHandler;
- private static Integer _retryInterval;
private static final Logger s_logger = Logger.getLogger(RabbitMQEventBus.class);
@Override
public boolean configure(String name, Map params) throws ConfigurationException {
- _amqpHost = (String) params.get("server");
- if (_amqpHost == null || _amqpHost.isEmpty()) {
- throw new ConfigurationException("Unable to get the AMQP server details");
- }
-
- _username = (String) params.get("username");
- if (_username == null || _username.isEmpty()) {
- throw new ConfigurationException("Unable to get the username details");
- }
-
- _password = (String) params.get("password");
- if (_password == null || _password.isEmpty()) {
- throw new ConfigurationException("Unable to get the password details");
- }
-
- _amqpExchangeName = (String) params.get("exchangename");
- if (_amqpExchangeName == null || _amqpExchangeName.isEmpty()) {
- throw new ConfigurationException("Unable to get the _exchange details on the AMQP server");
- }
-
try {
- String portStr = (String) params.get("port");
- if (portStr == null || portStr.isEmpty()) {
+ if (amqpHost == null || amqpHost.isEmpty()) {
+ throw new ConfigurationException("Unable to get the AMQP server details");
+ }
+
+ if (username == null || username.isEmpty()) {
+ throw new ConfigurationException("Unable to get the username details");
+ }
+
+ if (password == null || password.isEmpty()) {
+ throw new ConfigurationException("Unable to get the password details");
+ }
+
+ if (amqpExchangeName == null || amqpExchangeName.isEmpty()) {
+ throw new ConfigurationException("Unable to get the _exchange details on the AMQP server");
+ }
+
+ if (port == null) {
throw new ConfigurationException("Unable to get the port details of AMQP server");
}
- _port = Integer.parseInt(portStr);
- String retryIntervalStr = (String) params.get("retryinterval");
- if (retryIntervalStr == null || retryIntervalStr.isEmpty()) {
- // default to 10s to try out reconnect
- retryIntervalStr = "10000";
+ if (retryInterval == null) {
+ retryInterval = 10000;// default to 10s to try out reconnect
}
- _retryInterval = Integer.parseInt(retryIntervalStr);
+
} catch (NumberFormatException e) {
throw new ConfigurationException("Invalid port number/retry interval");
}
_subscribers = new ConcurrentHashMap>();
-
executorService = Executors.newCachedThreadPool();
disconnectHandler = new DisconnectHandler();
- _name = name;
+
return true;
}
+ public void setServer(String amqpHost) {
+ this.amqpHost = amqpHost;
+ }
+
+ public void setUsername(String username) {
+ this.username = username;
+ }
+
+ public void setPassword(String password) {
+ this.password = password;
+ }
+
+ public void setPort(Integer port) {
+ this.port = port;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public void setExchange(String exchange) {
+ this.amqpExchangeName = exchange;
+ }
+
+ public void setRetryInterval(Integer retryInterval) {
+ this.retryInterval = retryInterval;
+ }
+
/** Call to subscribe to interested set of events
*
* @param topic defines category and type of the events being subscribed to
@@ -141,9 +162,9 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
Channel channel = createChannel(connection);
// create a queue and bind it to the exchange with binding key formed from event topic
- createExchange(channel, _amqpExchangeName);
+ createExchange(channel, amqpExchangeName);
channel.queueDeclare(queueName, false, false, false, null);
- channel.queueBind(queueName, _amqpExchangeName, bindingKey);
+ channel.queueBind(queueName, amqpExchangeName, bindingKey);
// register a callback handler to receive the events that a subscriber subscribed to
channel.basicConsume(queueName, _autoAck, queueName,
@@ -216,8 +237,8 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
try {
Connection connection = getConnection();
Channel channel = createChannel(connection);
- createExchange(channel, _amqpExchangeName);
- publishEventToExchange(channel, _amqpExchangeName, routingKey, eventDescription);
+ createExchange(channel, amqpExchangeName);
+ publishEventToExchange(channel, amqpExchangeName, routingKey, eventDescription);
channel.close();
} catch (AlreadyClosedException e) {
closeConnection();
@@ -315,11 +336,11 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
private synchronized Connection createConnection() throws Exception {
try {
ConnectionFactory factory = new ConnectionFactory();
- factory.setUsername(_username);
- factory.setPassword(_password);
+ factory.setUsername(username);
+ factory.setPassword(password);
factory.setVirtualHost("/");
- factory.setHost(_amqpHost);
- factory.setPort(_port);
+ factory.setHost(amqpHost);
+ factory.setPort(port);
Connection connection = factory.newConnection();
connection.addShutdownListener(disconnectHandler);
_connection = connection;
@@ -481,7 +502,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
while (!connected) {
try {
- Thread.sleep(_retryInterval);
+ Thread.sleep(retryInterval);
} catch (InterruptedException ie) {
// ignore timer interrupts
}
@@ -504,9 +525,9 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
* with binding key formed from event topic
*/
Channel channel = createChannel(connection);
- createExchange(channel, _amqpExchangeName);
+ createExchange(channel, amqpExchangeName);
channel.queueDeclare(subscriberId, false, false, false, null);
- channel.queueBind(subscriberId, _amqpExchangeName, bindingKey);
+ channel.queueBind(subscriberId, amqpExchangeName, bindingKey);
// register a callback handler to receive the events that a subscriber subscribed to
channel.basicConsume(subscriberId, _autoAck, subscriberId,
diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml
index 579244014f9..4c3bc0c970f 100644
--- a/plugins/hypervisors/kvm/pom.xml
+++ b/plugins/hypervisors/kvm/pom.xml
@@ -43,6 +43,11 @@
libvirt
0.4.9
+
+ net.java.dev.jna
+ jna
+ 3.5.1
+
install
@@ -50,68 +55,23 @@
test
- maven-assembly-plugin
- 2.3
-
- kvm-agent
- false
-
- agent-descriptor.xml
-
-
+ org.apache.maven.plugins
+ maven-dependency-plugin
+ 2.5.1
- make-agent
+ copy-dependencies
package
- single
+ copy-dependencies
+
+ ${project.build.directory}/dependencies
+ runtime
+
-
- maven-resources-plugin
- 2.6
-
-
- copy-resources
-
- package
-
- copy-resources
-
-
- dist
-
-
- target
-
- kvm-agent.zip
-
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-dependency-plugin
- 2.5.1
-
-
- copy-dependencies
- package
-
- copy-dependencies
-
-
- target/dependencies
- runtime
-
-
-
-
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index 99b8723c26e..805de408996 100755
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -2934,7 +2934,21 @@ ServerResource {
vm.addComp(grd);
CpuTuneDef ctd = new CpuTuneDef();
- ctd.setShares(vmTO.getCpus() * vmTO.getMinSpeed());
+ /**
+ A 4.0.X/4.1.X management server doesn't send the correct JSON
+ command for getMinSpeed, it only sends a 'speed' field.
+
+ So if getMinSpeed() returns null we fall back to getSpeed().
+
+ This way a >4.1 agent can work communicate a <=4.1 management server
+
+ This change is due to the overcommit feature in 4.2
+ */
+ if (vmTO.getMinSpeed() != null) {
+ ctd.setShares(vmTO.getCpus() * vmTO.getMinSpeed());
+ } else {
+ ctd.setShares(vmTO.getCpus() * vmTO.getSpeed());
+ }
vm.addComp(ctd);
FeaturesDef features = new FeaturesDef();
diff --git a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
new file mode 100644
index 00000000000..018f2f5330e
--- /dev/null
+++ b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.hypervisor.kvm.resource;
+
+import org.junit.Test;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
+import com.cloud.template.VirtualMachineTemplate.BootloaderType;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachine.Type;
+import java.util.Random;
+import static org.junit.Assert.assertEquals;
+
+public class LibvirtComputingResourceTest {
+
+ String _hyperVisorType = "kvm";
+ Random _random = new Random();
+
+ /**
+ This test tests if the Agent can handle a vmSpec coming
+ from a <=4.1 management server.
+
+ The overcommit feature has not been merged in there and thus
+ only 'speed' is set.
+ */
+ @Test
+ public void testCreateVMFromSpecLegacy() {
+ int id = _random.nextInt(65534);
+ String name = "test-instance-1";
+
+ int cpus = _random.nextInt(7) + 1;
+ int speed = 1024;
+ int minRam = 256 * 1024;
+ int maxRam = 512 * 1024;
+
+ String os = "Ubuntu";
+ boolean haEnabled = false;
+ boolean limitCpuUse = false;
+
+ String vncAddr = "1.2.3.4";
+ String vncPassword = "mySuperSecretPassword";
+
+ LibvirtComputingResource lcr = new LibvirtComputingResource();
+ VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, speed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword);
+ to.setVncAddr(vncAddr);
+
+ LibvirtVMDef vm = lcr.createVMFromSpec(to);
+ vm.setHvsType(_hyperVisorType);
+
+ String vmStr = "\n";
+ vmStr += "" + name + "\n";
+ vmStr += "b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9\n";
+ vmStr += "" + os + "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "" + maxRam / 1024 + "\n";
+ vmStr += "" + minRam / 1024 + "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "" + cpus + "\n";
+ vmStr += "\n";
+ vmStr += "hvm\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "" + (cpus * speed) + "\n";
+ vmStr += "\n";
+ vmStr += "restart\n";
+ vmStr += "destroy\n";
+ vmStr += "destroy\n";
+ vmStr += "\n";
+
+ assertEquals(vmStr, vm.toString());
+ }
+
+ /**
+ This test tests if the Agent can handle a vmSpec coming
+ from a >4.1 management server.
+
+ It tests if the Agent can handle a vmSpec with overcommit
+ data like minSpeed and maxSpeed in there
+ */
+ @Test
+ public void testCreateVMFromSpec() {
+ int id = _random.nextInt(65534);
+ String name = "test-instance-1";
+
+ int cpus = _random.nextInt(7) + 1;
+ int minSpeed = 1024;
+ int maxSpeed = 2048;
+ int minRam = 256 * 1024;
+ int maxRam = 512 * 1024;
+
+ String os = "Ubuntu";
+ boolean haEnabled = false;
+ boolean limitCpuUse = false;
+
+ String vncAddr = "1.2.3.4";
+ String vncPassword = "mySuperSecretPassword";
+
+ LibvirtComputingResource lcr = new LibvirtComputingResource();
+ VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword);
+ to.setVncAddr(vncAddr);
+
+ LibvirtVMDef vm = lcr.createVMFromSpec(to);
+ vm.setHvsType(_hyperVisorType);
+
+ String vmStr = "\n";
+ vmStr += "" + name + "\n";
+ vmStr += "b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9\n";
+ vmStr += "" + os + "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "" + maxRam / 1024 + "\n";
+ vmStr += "" + minRam / 1024 + "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "" + cpus + "\n";
+ vmStr += "\n";
+ vmStr += "hvm\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "\n";
+ vmStr += "" + (cpus * minSpeed) + "\n";
+ vmStr += "\n";
+ vmStr += "restart\n";
+ vmStr += "destroy\n";
+ vmStr += "destroy\n";
+ vmStr += "\n";
+
+ assertEquals(vmStr, vm.toString());
+ }
+}
\ No newline at end of file
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java
index 00b6111e287..ee5ebe4d671 100755
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java
@@ -29,10 +29,13 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
+import org.apache.cloudstack.api.ApiConstants;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.alert.AlertManager;
+import com.cloud.configuration.Config;
+import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter.NetworkType;
@@ -41,6 +44,7 @@ import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.exception.DiscoveredWithErrorException;
import com.cloud.exception.DiscoveryException;
+import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor;
@@ -49,10 +53,14 @@ import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
import com.cloud.hypervisor.vmware.manager.VmwareManager;
import com.cloud.hypervisor.vmware.mo.ClusterMO;
import com.cloud.hypervisor.vmware.mo.HostMO;
+import com.cloud.hypervisor.vmware.mo.VirtualSwitchType;
import com.cloud.hypervisor.vmware.resource.VmwareContextFactory;
import com.cloud.hypervisor.vmware.resource.VmwareResource;
import com.cloud.hypervisor.vmware.util.VmwareContext;
import com.cloud.network.NetworkModel;
+import com.cloud.network.Networks.TrafficType;
+import com.cloud.network.PhysicalNetwork;
+import com.cloud.network.VmwareTrafficLabel;
import com.cloud.network.dao.CiscoNexusVSMDeviceDao;
import com.cloud.resource.Discoverer;
import com.cloud.resource.DiscovererBase;
@@ -60,9 +68,9 @@ import com.cloud.resource.ResourceManager;
import com.cloud.resource.ResourceStateAdapter;
import com.cloud.resource.ServerResource;
import com.cloud.resource.UnableDeleteHostException;
-import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.TemplateType;
+import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.user.Account;
import com.cloud.utils.UriUtils;
@@ -99,6 +107,9 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
NetworkModel _netmgr;
@Inject
HypervisorCapabilitiesDao _hvCapabilitiesDao;
+ protected Map _urlParams;
+ protected boolean useDVS = false;
+ protected boolean nexusDVS = false;
public VmwareServerDiscoverer() {
s_logger.info("VmwareServerDiscoverer is constructed");
@@ -139,12 +150,84 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
String guestTrafficLabel = null;
Map vsmCredentials = null;
+ VirtualSwitchType defaultVirtualSwitchType = VirtualSwitchType.StandardVirtualSwitch;
+
+ String paramGuestVswitchType = null;
+ String paramGuestVswitchName = null;
+ String paramPublicVswitchType = null;
+ String paramPublicVswitchName = null;
+
+ VmwareTrafficLabel guestTrafficLabelObj = new VmwareTrafficLabel(TrafficType.Guest);
+ VmwareTrafficLabel publicTrafficLabelObj = new VmwareTrafficLabel(TrafficType.Public);
+ Map clusterDetails = _clusterDetailsDao.findDetails(clusterId);
+ _readGlobalConfigParameters();
+
+ // Set default physical network end points for public and guest traffic
+ // Private traffic will be only on standard vSwitch for now. See below TODO.
+ if (useDVS) {
+ // Parse url parameters for type of vswitch and name of vswitch specified at cluster level
+ paramGuestVswitchType = _urlParams.get(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC);
+ paramGuestVswitchName = _urlParams.get(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC);
+ paramPublicVswitchType = _urlParams.get(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC);
+ paramPublicVswitchName = _urlParams.get(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC);
+ defaultVirtualSwitchType = getDefaultVirtualSwitchType(nexusDVS);
+ }
+ // Get zone wide traffic labels for Guest traffic and Public traffic
+ guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(dcId, HypervisorType.VMware);
+ publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(dcId, HypervisorType.VMware);
+
+ // Process traffic label information provided at zone level and cluster level
+ guestTrafficLabelObj = getTrafficInfo(TrafficType.Guest, guestTrafficLabel, defaultVirtualSwitchType, paramGuestVswitchType, paramGuestVswitchName, clusterId);
+ publicTrafficLabelObj = getTrafficInfo(TrafficType.Public, publicTrafficLabel, defaultVirtualSwitchType, paramPublicVswitchType, paramPublicVswitchName, clusterId);
+
+ // Zone level vSwitch Type depends on zone level traffic labels
+ //
+ // User can override Zone wide vswitch type (for public and guest) by providing following optional parameters in addClusterCmd
+ // param "guestvswitchtype" with valid values vmwaredvs, vmwaresvs, nexusdvs
+ // param "publicvswitchtype" with valid values vmwaredvs, vmwaresvs, nexusdvs
+ //
+ // Format of label is ,,
+ // If a field OR is not present leave it empty.
+ // Ex: 1) vswitch0
+ // 2) dvswitch0,200,vmwaredvs
+ // 3) nexusepp0,300,nexusdvs
+ // 4) vswitch1,400,vmwaresvs
+ // 5) vswitch0
+ // default vswitchtype is 'vmwaresvs'.
+ // 'vmwaresvs' is for vmware standard vswitch
+ // 'vmwaredvs' is for vmware distributed virtual switch
+ // 'nexusdvs' is for cisco nexus distributed virtual switch
+
+ // Configuration Check: A physical network cannot be shared by different types of virtual switches.
+ //
+ // Check if different vswitch types are chosen for same physical network
+ // 1. Get physical network for guest traffic - multiple networks
+ // 2. Get physical network for public traffic - single network
+ // See if 2 is in 1
+ // if no - pass
+ // if yes - compare publicTrafficLabelObj.getVirtualSwitchType() == guestTrafficLabelObj.getVirtualSwitchType()
+ // true - pass
+ // false - throw exception - fail cluster add operation
+ List extends PhysicalNetwork> pNetworkListGuestTraffic = _netmgr.getPhysicalNtwksSupportingTrafficType(dcId, TrafficType.Guest);
+ List extends PhysicalNetwork> pNetworkListPublicTraffic = _netmgr.getPhysicalNtwksSupportingTrafficType(dcId, TrafficType.Public);
+ // Public network would be on single physical network hence getting first object of the list would suffice.
+ PhysicalNetwork pNetworkPublic = pNetworkListPublicTraffic.get(0);
+ if (pNetworkListGuestTraffic.contains(pNetworkPublic)) {
+ if (publicTrafficLabelObj.getVirtualSwitchType() != guestTrafficLabelObj.getVirtualSwitchType()) {
+ String msg = "Both public traffic and guest traffic is over same physical network " + pNetworkPublic +
+ ". And virtual switch type chosen for each traffic is different" +
+ ". A physical network cannot be shared by different types of virtual switches.";
+ s_logger.error(msg);
+ throw new InvalidParameterValueException(msg);
+ }
+ }
+
privateTrafficLabel = _netmgr.getDefaultManagementTrafficLabel(dcId, HypervisorType.VMware);
if (privateTrafficLabel != null) {
s_logger.info("Detected private network label : " + privateTrafficLabel);
}
- if (_vmwareMgr.getNexusVSwitchGlobalParameter()) {
+ if (nexusDVS) {
DataCenterVO zone = _dcDao.findById(dcId);
NetworkType zoneType = zone.getNetworkType();
if (zoneType != NetworkType.Basic) {
@@ -169,7 +252,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
context.registerStockObject("privateTrafficLabel",
privateTrafficLabel);
- if (_vmwareMgr.getNexusVSwitchGlobalParameter()) {
+ if (nexusDVS) {
if (vsmCredentials != null) {
s_logger.info("Stocking credentials of Nexus VSM");
context.registerStockObject("vsmcredentials",
@@ -191,8 +274,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
}
ManagedObjectReference morCluster = null;
- Map clusterDetails = _clusterDetailsDao
- .findDetails(clusterId);
+ clusterDetails = _clusterDetailsDao.findDetails(clusterId);
if (clusterDetails.get("url") != null) {
URI uriFromCluster = new URI(
UriUtils.encodeURIComponent(clusterDetails.get("url")));
@@ -248,13 +330,8 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
params.put("private.network.vswitch.name",
privateTrafficLabel);
}
- if (publicTrafficLabel != null) {
- params.put("public.network.vswitch.name",
- publicTrafficLabel);
- }
- if (guestTrafficLabel != null) {
- params.put("guest.network.vswitch.name", guestTrafficLabel);
- }
+ params.put("guestTrafficInfo", guestTrafficLabelObj);
+ params.put("publicTrafficInfo", publicTrafficLabelObj);
VmwareResource resource = new VmwareResource();
try {
@@ -418,4 +495,153 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
.getSimpleName());
return super.stop();
}
+
+ private VmwareTrafficLabel getTrafficInfo(TrafficType trafficType, String zoneWideTrafficLabel, VirtualSwitchType defaultVirtualSwitchType, String vSwitchType, String vSwitchName, Long clusterId) {
+ VmwareTrafficLabel trafficLabelObj = null;
+ Map clusterDetails = null;
+ try {
+ trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defaultVirtualSwitchType);
+ } catch (InvalidParameterValueException e) {
+ s_logger.error("Failed to recognize virtual switch type specified for " + trafficType +
+ " traffic due to " + e.getMessage());
+ throw e;
+ }
+
+ if (defaultVirtualSwitchType.equals(VirtualSwitchType.StandardVirtualSwitch)|| (vSwitchType == null && vSwitchName == null)) {
+ // Case of no cluster level override configuration defined.
+ // Depend only on zone wide traffic label
+ // If global param for dvSwitch is false return default traffic info object with vmware standard vswitch
+ return trafficLabelObj;
+ } else {
+ // Need to persist cluster level override configuration to db
+ clusterDetails = _clusterDetailsDao.findDetails(clusterId);
+ }
+
+ if (vSwitchName != null) {
+ trafficLabelObj.setVirtualSwitchName(vSwitchName);
+ if (trafficType == TrafficType.Guest) {
+ clusterDetails.put(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC, vSwitchName);
+ } else {
+ clusterDetails.put(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC, vSwitchName);
+ }
+ }
+
+ if (vSwitchType != null) {
+ validateVswitchType(vSwitchType);
+ trafficLabelObj.setVirtualSwitchType(VirtualSwitchType.getType(vSwitchType));
+ if (trafficType == TrafficType.Guest) {
+ clusterDetails.put(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, vSwitchType);
+ } else {
+ clusterDetails.put(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, vSwitchType);
+ }
+ }
+
+ // Save cluster level override configuration to cluster details
+ _clusterDetailsDao.persist(clusterId, clusterDetails);
+
+ return trafficLabelObj;
+ }
+
+ private VmwareTrafficLabel getTrafficInfo(TrafficType trafficType, String zoneWideTrafficLabel, Map clusterDetails, VirtualSwitchType defVirtualSwitchType) {
+ VmwareTrafficLabel trafficLabelObj = null;
+ try {
+ trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defVirtualSwitchType);
+ } catch (InvalidParameterValueException e) {
+ s_logger.error("Failed to recognize virtual switch type specified for " + trafficType +
+ " traffic due to " + e.getMessage());
+ throw e;
+ }
+
+ if(defVirtualSwitchType.equals(VirtualSwitchType.StandardVirtualSwitch)) {
+ return trafficLabelObj;
+ }
+
+ if (trafficType == TrafficType.Guest) {
+ if(clusterDetails.containsKey(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC)) {
+ trafficLabelObj.setVirtualSwitchName(clusterDetails.get(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC));
+ }
+ if(clusterDetails.containsKey(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC)) {
+ trafficLabelObj.setVirtualSwitchType(VirtualSwitchType.getType(clusterDetails.get(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC)));
+ }
+ } else if (trafficType == TrafficType.Public) {
+ if(clusterDetails.containsKey(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC)) {
+ trafficLabelObj.setVirtualSwitchName(clusterDetails.get(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC));
+ }
+ if(clusterDetails.containsKey(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC)) {
+ trafficLabelObj.setVirtualSwitchType(VirtualSwitchType.getType(clusterDetails.get(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC)));
+ }
+ }
+
+ return trafficLabelObj;
+ }
+
+ private void _readGlobalConfigParameters() {
+ String value;
+ if (_configDao != null) {
+ value = _configDao.getValue(Config.VmwareUseDVSwitch.key());
+ useDVS = Boolean.parseBoolean(value);
+ value = _configDao.getValue(Config.VmwareUseNexusVSwitch.key());
+ nexusDVS = Boolean.parseBoolean(value);
+ }
+ }
+
+ @Override
+ protected HashMap buildConfigParams(HostVO host) {
+ HashMap params = super.buildConfigParams(host);
+
+ Map clusterDetails = _clusterDetailsDao.findDetails(host.getClusterId());
+ // Get zone wide traffic labels from guest traffic and public traffic
+ String guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(host.getDataCenterId(), HypervisorType.VMware);
+ String publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(host.getDataCenterId(), HypervisorType.VMware);
+ _readGlobalConfigParameters();
+ VirtualSwitchType defaultVirtualSwitchType = getDefaultVirtualSwitchType(nexusDVS);
+
+ params.put("guestTrafficInfo", getTrafficInfo(TrafficType.Guest, guestTrafficLabel, clusterDetails, defaultVirtualSwitchType));
+ params.put("publicTrafficInfo", getTrafficInfo(TrafficType.Public, publicTrafficLabel, clusterDetails, defaultVirtualSwitchType));
+
+ return params;
+ }
+
+ private VirtualSwitchType getDefaultVirtualSwitchType(boolean nexusDVS) {
+ return nexusDVS ? VirtualSwitchType.NexusDistributedVirtualSwitch : VirtualSwitchType.VMwareDistributedVirtualSwitch;
+ }
+
+ @Override
+ public ServerResource reloadResource(HostVO host) {
+ String resourceName = host.getResource();
+ ServerResource resource = getResource(resourceName);
+
+ if (resource != null) {
+ _hostDao.loadDetails(host);
+
+ HashMap params = buildConfigParams(host);
+ try {
+ resource.configure(host.getName(), params);
+ } catch (ConfigurationException e) {
+ s_logger.warn("Unable to configure resource due to " + e.getMessage());
+ return null;
+ }
+ if (!resource.start()) {
+ s_logger.warn("Unable to start the resource");
+ return null;
+ }
+ }
+ return resource;
+ }
+
+ private void validateVswitchType(String inputVswitchType) {
+ VirtualSwitchType vSwitchType = VirtualSwitchType.getType(inputVswitchType);
+ if (vSwitchType == VirtualSwitchType.None) {
+ s_logger.error("Unable to resolve " + inputVswitchType + " to a valid virtual switch type in VMware environment.");
+ throw new InvalidParameterValueException("Invalid virtual switch type : " + inputVswitchType);
+ }
+ }
+
+ @Override
+ public void putParam(Map params) {
+ if (_urlParams == null) {
+ _urlParams = new HashMap();
+ }
+ _urlParams.putAll(params);
+ }
}
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java
index 36fa0f338b1..fb6d3d6667f 100755
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java
@@ -59,17 +59,11 @@ public interface VmwareManager {
boolean beginExclusiveOperation(int timeOutSeconds);
void endExclusiveOperation();
- boolean getNexusVSwitchGlobalParameter();
-
boolean getFullCloneFlag();
Map getNexusVSMCredentialsByClusterId(Long clusterId);
String getPrivateVSwitchName(long dcId, HypervisorType hypervisorType);
-
- String getPublicVSwitchName(long dcId, HypervisorType hypervisorType);
-
- String getGuestVSwitchName(long dcId, HypervisorType hypervisorType);
public String getRootDiskController();
}
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java
index 9e71f2c49ad..e44054ee1d5 100755
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java
@@ -127,10 +127,9 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
String _mountParent;
StorageLayer _storage;
+ String _privateNetworkVSwitchName = "vSwitch0";
- String _privateNetworkVSwitchName;
- String _publicNetworkVSwitchName;
- String _guestNetworkVSwitchName;
+ int _portsPerDvPortGroup = 256;
boolean _nexusVSwitchActive;
boolean _fullCloneFlag;
String _serviceConsoleName;
@@ -197,14 +196,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
_storage.configure("StorageLayer", params);
}
- value = _configDao.getValue(Config.VmwareUseNexusVSwitch.key());
- if(value == null) {
- _nexusVSwitchActive = false;
- }
- else {
- _nexusVSwitchActive = Boolean.parseBoolean(value);
- }
-
value = _configDao.getValue(Config.VmwareCreateFullClone.key());
if (value == null) {
_fullCloneFlag = false;
@@ -212,36 +203,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
_fullCloneFlag = Boolean.parseBoolean(value);
}
- _privateNetworkVSwitchName = _configDao.getValue(Config.VmwarePrivateNetworkVSwitch.key());
-
- if (_privateNetworkVSwitchName == null) {
- if (_nexusVSwitchActive) {
- _privateNetworkVSwitchName = "privateEthernetPortProfile";
- } else {
- _privateNetworkVSwitchName = "vSwitch0";
- }
- }
-
- _publicNetworkVSwitchName = _configDao.getValue(Config.VmwarePublicNetworkVSwitch.key());
-
- if (_publicNetworkVSwitchName == null) {
- if (_nexusVSwitchActive) {
- _publicNetworkVSwitchName = "publicEthernetPortProfile";
- } else {
- _publicNetworkVSwitchName = "vSwitch0";
- }
- }
-
- _guestNetworkVSwitchName = _configDao.getValue(Config.VmwareGuestNetworkVSwitch.key());
-
- if (_guestNetworkVSwitchName == null) {
- if (_nexusVSwitchActive) {
- _guestNetworkVSwitchName = "guestEthernetPortProfile";
- } else {
- _guestNetworkVSwitchName = "vSwitch0";
- }
- }
-
_serviceConsoleName = _configDao.getValue(Config.VmwareServiceConsole.key());
if(_serviceConsoleName == null) {
_serviceConsoleName = "Service Console";
@@ -320,11 +281,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
return true;
}
- @Override
- public boolean getNexusVSwitchGlobalParameter() {
- return _nexusVSwitchActive;
- }
-
@Override
public boolean getFullCloneFlag() {
return _fullCloneFlag;
@@ -340,15 +296,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
return _netMgr.getDefaultManagementTrafficLabel(dcId, hypervisorType);
}
- @Override
- public String getPublicVSwitchName(long dcId, HypervisorType hypervisorType) {
- return _netMgr.getDefaultPublicTrafficLabel(dcId, hypervisorType);
- }
-
- @Override
- public String getGuestVSwitchName(long dcId, HypervisorType hypervisorType) {
- return _netMgr.getDefaultGuestTrafficLabel(dcId, hypervisorType);
- }
private void prepareHost(HostMO hostMo, String privateTrafficLabel) throws Exception {
// For ESX host, we need to enable host firewall to allow VNC access
@@ -370,12 +317,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
}
s_logger.info("Preparing network on host " + hostMo.getContext().toString() + " for " + privateTrafficLabel);
- if(!_nexusVSwitchActive) {
HypervisorHostHelper.prepareNetwork(vSwitchName, "cloud.private", hostMo, vlanId, null, null, 180000, false);
- }
- else {
- HypervisorHostHelper.prepareNetwork(vSwitchName, "cloud.private", hostMo, vlanId, null, null, 180000);
- }
+
}
@Override
@@ -508,10 +451,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
@Override
public void setupResourceStartupParams(Map params) {
- params.put("private.network.vswitch.name", _privateNetworkVSwitchName);
- params.put("public.network.vswitch.name", _publicNetworkVSwitchName);
- params.put("guest.network.vswitch.name", _guestNetworkVSwitchName);
- params.put("vmware.use.nexus.vswitch", _nexusVSwitchActive);
params.put("vmware.create.full.clone", _fullCloneFlag);
params.put("service.console.name", _serviceConsoleName);
params.put("management.portgroup.name", _managemetPortGroupName);
@@ -519,6 +458,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
params.put("vmware.reserve.mem", _reserveMem);
params.put("vmware.root.disk.controller", _rootDiskController);
params.put("vmware.recycle.hung.wokervm", _recycleHungWorker);
+ params.put("ports.per.dvportgroup", _portsPerDvPortGroup);
}
@Override
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 1bc233cc70c..83226a41db4 100755
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -201,6 +201,8 @@ import com.cloud.network.HAProxyConfigurator;
import com.cloud.network.LoadBalancerConfigurator;
import com.cloud.network.Networks;
import com.cloud.network.Networks.BroadcastDomainType;
+import com.cloud.network.Networks.TrafficType;
+import com.cloud.network.VmwareTrafficLabel;
import com.cloud.network.rules.FirewallRule;
import com.cloud.resource.ServerResource;
import com.cloud.serializer.GsonHelper;
@@ -290,10 +292,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
protected String _vCenterAddress;
protected String _privateNetworkVSwitchName;
- protected String _publicNetworkVSwitchName;
- protected String _guestNetworkVSwitchName;
- protected VirtualSwitchType _vSwitchType = VirtualSwitchType.StandardVirtualSwitch;
- protected boolean _nexusVSwitch = false;
+ protected VmwareTrafficLabel _guestTrafficInfo = new VmwareTrafficLabel(TrafficType.Guest);
+ protected VmwareTrafficLabel _publicTrafficInfo = new VmwareTrafficLabel(TrafficType.Public);
+ protected int _portsPerDvPortGroup;
protected boolean _fullCloneFlag = false;
protected boolean _reserveCpu = false;
@@ -1328,7 +1329,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
NicTO nicTo = cmd.getNic();
VirtualDevice nic;
Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo);
- if (mgr.getNexusVSwitchGlobalParameter()) {
+ if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
String dvSwitchUuid;
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor);
@@ -1560,13 +1561,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
private void plugPublicNic(VirtualMachineMO vmMo, final String vlanId, final String vifMacAddress) throws Exception {
// TODO : probably need to set traffic shaping
Pair networkInfo = null;
-
- if (!_nexusVSwitch) {
- networkInfo = HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public",
+ VirtualSwitchType vSwitchType = VirtualSwitchType.StandardVirtualSwitch;
+ if (_publicTrafficInfo != null) {
+ vSwitchType = _publicTrafficInfo.getVirtualSwitchType();
+ }
+ if (VirtualSwitchType.StandardVirtualSwitch == vSwitchType) {
+ networkInfo = HypervisorHostHelper.prepareNetwork(this._publicTrafficInfo.getVirtualSwitchName(), "cloud.public",
vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, true);
} else {
- networkInfo = HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public",
- vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout);
+ networkInfo = HypervisorHostHelper.prepareNetwork(this._publicTrafficInfo.getVirtualSwitchName(), "cloud.public",
+ vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, vSwitchType, _portsPerDvPortGroup);
}
int nicIndex = allocPublicNicIndex(vmMo);
@@ -1576,7 +1580,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
VirtualEthernetCard device = (VirtualEthernetCard) nicDevices[nicIndex];
- if (!_nexusVSwitch) {
+ if (VirtualSwitchType.StandardVirtualSwitch == vSwitchType) {
VirtualEthernetCardNetworkBackingInfo nicBacking = new VirtualEthernetCardNetworkBackingInfo();
nicBacking.setDeviceName(networkInfo.second());
nicBacking.setNetwork(networkInfo.first());
@@ -2279,7 +2283,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo));
Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo);
- if (mgr.getNexusVSwitchGlobalParameter()) {
+ if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
String dvSwitchUuid;
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor);
@@ -2479,21 +2483,36 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo) throws Exception {
+ Pair switchName;
+ TrafficType trafficType;
+ VirtualSwitchType switchType;
+
+ switchName = getTargetSwitch(nicTo);
+ trafficType = nicTo.getType();
+ // Get switch type from resource property which is dictated by cluster property
+ // If a virtual switch type is specified while adding cluster that will be used.
+ // Else If virtual switch type is specified in physical traffic label that will be used
+ // Else use standard vSwitch
+ switchType = VirtualSwitchType.StandardVirtualSwitch;
+ if (trafficType == TrafficType.Guest && _guestTrafficInfo != null) {
+ switchType = _guestTrafficInfo.getVirtualSwitchType();
+ } else if (trafficType == TrafficType.Public && _publicTrafficInfo != null) {
+ switchType = _publicTrafficInfo.getVirtualSwitchType();
+ }
- Pair switchName = getTargetSwitch(nicTo);
String namePrefix = getNetworkNamePrefix(nicTo);
Pair networkInfo = null;
- s_logger.info("Prepare network on vSwitch: " + switchName + " with name prefix: " + namePrefix);
+ s_logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix);
- if(!_nexusVSwitch) {
+ if (VirtualSwitchType.StandardVirtualSwitch == switchType) {
networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()),
nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout,
!namePrefix.startsWith("cloud.private"));
}
else {
networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()),
- nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout);
+ nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, switchType, _portsPerDvPortGroup);
}
return networkInfo;
@@ -2503,8 +2522,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
private Pair getTargetSwitch(NicTO nicTo) throws Exception {
if(nicTo.getName() != null && !nicTo.getName().isEmpty()) {
String[] tokens = nicTo.getName().split(",");
-
- if(tokens.length == 2) {
+ // Format of network traffic label is ,,
+ // If all 3 fields are mentioned then number of tokens would be 3.
+ // If only , are mentioned then number of tokens would be 2.
+ if(tokens.length == 2 || tokens.length == 3) {
return new Pair(tokens[0], tokens[1]);
} else {
return new Pair(nicTo.getName(), Vlan.UNTAGGED);
@@ -2512,11 +2533,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
if (nicTo.getType() == Networks.TrafficType.Guest) {
- return new Pair(this._guestNetworkVSwitchName, Vlan.UNTAGGED);
+ return new Pair(this._guestTrafficInfo.getVirtualSwitchName(), Vlan.UNTAGGED);
} else if (nicTo.getType() == Networks.TrafficType.Control || nicTo.getType() == Networks.TrafficType.Management) {
return new Pair(this._privateNetworkVSwitchName, Vlan.UNTAGGED);
} else if (nicTo.getType() == Networks.TrafficType.Public) {
- return new Pair(this._publicNetworkVSwitchName, Vlan.UNTAGGED);
+ return new Pair(this._publicTrafficInfo.getVirtualSwitchName(), Vlan.UNTAGGED);
} else if (nicTo.getType() == Networks.TrafficType.Storage) {
return new Pair(this._privateNetworkVSwitchName, Vlan.UNTAGGED);
} else if (nicTo.getType() == Networks.TrafficType.Vpn) {
@@ -4557,7 +4578,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if(!"untagged".equalsIgnoreCase(tokens[2]))
vlanId = tokens[2];
- HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public",
+ HypervisorHostHelper.prepareNetwork(_publicTrafficInfo.getVirtualSwitchName(), "cloud.public",
hostMo, vlanId, networkRateMbps, null, this._ops_timeout, false);
} else {
s_logger.info("Skip suspecious cloud network " + networkName);
@@ -4574,7 +4595,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if(!"untagged".equalsIgnoreCase(tokens[2]))
vlanId = tokens[2];
- HypervisorHostHelper.prepareNetwork(this._guestNetworkVSwitchName, "cloud.guest",
+ HypervisorHostHelper.prepareNetwork(_guestTrafficInfo.getVirtualSwitchName(), "cloud.guest",
hostMo, vlanId, networkRateMbps, null, this._ops_timeout, false);
} else {
s_logger.info("Skip suspecious cloud network " + networkName);
@@ -4893,6 +4914,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
_morHyperHost.setType(hostTokens[0]);
_morHyperHost.setValue(hostTokens[1]);
+ _guestTrafficInfo = (VmwareTrafficLabel) params.get("guestTrafficInfo");
+ _publicTrafficInfo = (VmwareTrafficLabel) params.get("publicTrafficInfo");
VmwareContext context = getServiceContext();
try {
VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
@@ -4900,12 +4923,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(context, context.getServiceContent().getCustomFieldsManager());
cfmMo.ensureCustomFieldDef("Datastore", CustomFieldConstants.CLOUD_UUID);
- if (mgr.getNexusVSwitchGlobalParameter()) {
+ if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch ||
+ _guestTrafficInfo != null && _guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) {
cfmMo.ensureCustomFieldDef("DistributedVirtualPortgroup", CustomFieldConstants.CLOUD_GC_DVP);
- } else {
- cfmMo.ensureCustomFieldDef("Network", CustomFieldConstants.CLOUD_GC);
}
-
+ cfmMo.ensureCustomFieldDef("Network", CustomFieldConstants.CLOUD_GC);
cfmMo.ensureCustomFieldDef("VirtualMachine", CustomFieldConstants.CLOUD_UUID);
cfmMo.ensureCustomFieldDef("VirtualMachine", CustomFieldConstants.CLOUD_NIC_MASK);
@@ -4913,15 +4935,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
_hostName = hostMo.getHyperHostName();
Map vsmCredentials;
- if (mgr.getNexusVSwitchGlobalParameter()) {
+ if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch ||
+ _publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) {
vsmCredentials = mgr.getNexusVSMCredentialsByClusterId(Long.parseLong(_cluster));
if (vsmCredentials != null) {
s_logger.info("Stocking credentials while configuring resource.");
context.registerStockObject("vsmcredentials", vsmCredentials);
}
_privateNetworkVSwitchName = mgr.getPrivateVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware);
- _publicNetworkVSwitchName = mgr.getPublicVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware);
- _guestNetworkVSwitchName = mgr.getGuestVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware);
}
} catch (Exception e) {
@@ -4931,12 +4952,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if(_privateNetworkVSwitchName == null) {
_privateNetworkVSwitchName = (String) params.get("private.network.vswitch.name");
}
- if(_publicNetworkVSwitchName == null) {
- _publicNetworkVSwitchName = (String) params.get("public.network.vswitch.name");
- }
- if(_guestNetworkVSwitchName == null) {
- _guestNetworkVSwitchName = (String) params.get("guest.network.vswitch.name");
- }
String value = (String) params.get("vmware.reserve.cpu");
if(value != null && value.equalsIgnoreCase("true"))
@@ -4956,9 +4971,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
else
_rootDiskController = DiskControllerType.ide;
- value = params.get("vmware.use.nexus.vswitch").toString();
- if(value != null && value.equalsIgnoreCase("true"))
- _nexusVSwitch = true;
+ Integer intObj = (Integer) params.get("ports.per.dvportgroup");
+ if (intObj != null)
+ _portsPerDvPortGroup = intObj.intValue();
+
+ s_logger.info("VmwareResource network configuration info." +
+ " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " +
+ this._publicTrafficInfo.getVirtualSwitchType() + " : " + this._publicTrafficInfo.getVirtualSwitchName() +
+ ", guest traffic over " + this._guestTrafficInfo.getVirtualSwitchType() + " : " +
+ this._guestTrafficInfo.getVirtualSwitchName());
value = params.get("vmware.create.full.clone").toString();
if (value != null && value.equalsIgnoreCase("true")) {
@@ -4967,9 +4988,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
_fullCloneFlag = false;
}
- s_logger.info("VmwareResource network configuration info. private vSwitch: " + _privateNetworkVSwitchName + ", public vSwitch: " + _publicNetworkVSwitchName + ", guest network: "
- + _guestNetworkVSwitchName);
-
return true;
}
diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/VmwareTrafficLabel.java b/plugins/hypervisors/vmware/src/com/cloud/network/VmwareTrafficLabel.java
new file mode 100644
index 00000000000..90a42781e29
--- /dev/null
+++ b/plugins/hypervisors/vmware/src/com/cloud/network/VmwareTrafficLabel.java
@@ -0,0 +1,118 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.network;
+
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.hypervisor.vmware.mo.VirtualSwitchType;
+import com.cloud.network.Networks.TrafficType;
+
+public class VmwareTrafficLabel implements TrafficLabel {
+ public static final String DEFAULT_VSWITCH_NAME = "vSwitch0";
+ public static final String DEFAULT_DVSWITCH_NAME = "dvSwitch0";
+ public static final String DEFAULT_NDVSWITCH_NAME = "epp0";
+ public static final int MAX_FIELDS_VMWARE_LABEL = 3;
+ public static final int VMWARE_LABEL_FIELD_INDEX_NAME = 0;
+ public static final int VMWARE_LABEL_FIELD_INDEX_VLANID = 1;
+ public static final int VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE = 2;
+
+ TrafficType _trafficType = TrafficType.None;
+ VirtualSwitchType _vSwitchType = VirtualSwitchType.StandardVirtualSwitch;
+ String _vSwitchName = DEFAULT_VSWITCH_NAME;
+ String _vlanId = null;
+
+ public VmwareTrafficLabel(String networkLabel, TrafficType trafficType, VirtualSwitchType defVswitchType) {
+ _trafficType = trafficType;
+ _parseLabel(networkLabel, defVswitchType);
+ }
+
+ public VmwareTrafficLabel(String networkLabel, TrafficType trafficType) {
+ _trafficType = trafficType;
+ _parseLabel(networkLabel, VirtualSwitchType.StandardVirtualSwitch);
+ }
+
+ public VmwareTrafficLabel(TrafficType trafficType, VirtualSwitchType defVswitchType) {
+ _trafficType = trafficType; // Define traffic label with specific traffic type
+ _parseLabel(null, defVswitchType);
+ }
+
+ public VmwareTrafficLabel(TrafficType trafficType) {
+ _trafficType = trafficType; // Define traffic label with specific traffic type
+ _parseLabel(null, VirtualSwitchType.StandardVirtualSwitch);
+ }
+
+ public VmwareTrafficLabel() {
+ }
+
+ private void _parseLabel(String networkLabel, VirtualSwitchType defVswitchType) {
+ if (networkLabel == null || networkLabel.isEmpty()) {
+ // Set defaults for label in case of distributed vSwitch
+ if (defVswitchType.equals(VirtualSwitchType.VMwareDistributedVirtualSwitch)) {
+ _vSwitchName = DEFAULT_DVSWITCH_NAME;
+ _vSwitchType = VirtualSwitchType.VMwareDistributedVirtualSwitch;
+ } else if (defVswitchType.equals(VirtualSwitchType.NexusDistributedVirtualSwitch)) {
+ _vSwitchName = DEFAULT_NDVSWITCH_NAME;
+ _vSwitchType = VirtualSwitchType.NexusDistributedVirtualSwitch;
+ }
+ return;
+ }
+ String[] tokens = networkLabel.split(",");
+ if (tokens.length > VMWARE_LABEL_FIELD_INDEX_NAME) {
+ _vSwitchName = tokens[VMWARE_LABEL_FIELD_INDEX_NAME].trim();
+ }
+ if (tokens.length > VMWARE_LABEL_FIELD_INDEX_VLANID) {
+ _vlanId = tokens[VMWARE_LABEL_FIELD_INDEX_VLANID].trim();
+ }
+ if (tokens.length > VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE) {
+ _vSwitchType = VirtualSwitchType.getType(tokens[VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE].trim());
+ if(VirtualSwitchType.None == _vSwitchType) {
+ throw new InvalidParameterValueException("Invalid virtual switch type : " + tokens[VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE].trim());
+ }
+ }
+ if (tokens.length > MAX_FIELDS_VMWARE_LABEL ) {
+ throw new InvalidParameterValueException("Found extraneous fields in vmware traffic label : " + networkLabel);
+ }
+ }
+
+ @Override
+ public TrafficType getTrafficType() {
+ return _trafficType;
+ }
+
+ @Override
+ public String getNetworkLabel() {
+ return null;
+ }
+
+ public VirtualSwitchType getVirtualSwitchType() {
+ return _vSwitchType;
+ }
+
+ public String getVirtualSwitchName() {
+ return _vSwitchName;
+ }
+
+ public String getVlanId() {
+ return _vlanId;
+ }
+ public void setVirtualSwitchName(String vSwitchName) {
+ _vSwitchName = vSwitchName;
+ }
+
+ public void setVirtualSwitchType(VirtualSwitchType vSwitchType) {
+ _vSwitchType = vSwitchType;
+ }
+}
diff --git a/plugins/storage-allocators/random/pom.xml b/plugins/storage-allocators/random/pom.xml
index 06754ffc133..6b91908271a 100644
--- a/plugins/storage-allocators/random/pom.xml
+++ b/plugins/storage-allocators/random/pom.xml
@@ -16,7 +16,8 @@
specific language governing permissions and limitations
under the License.
-->
-
+
4.0.0
cloud-plugin-storage-allocator-random
Apache CloudStack Plugin - Storage Allocator Random
@@ -26,4 +27,11 @@
4.2.0-SNAPSHOT
../../pom.xml
+
+
+ org.apache.cloudstack
+ cloud-engine-storage
+ ${project.version}
+
+
diff --git a/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java b/plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java
similarity index 74%
rename from plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java
rename to plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java
index af21f50cc6f..cbe6647ded8 100644
--- a/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java
+++ b/plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java
@@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package com.cloud.storage.allocator;
+package org.apache.cloudstack.storage.allocator;
import java.util.ArrayList;
import java.util.Collections;
@@ -21,44 +21,32 @@ import java.util.List;
import javax.ejb.Local;
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.server.StatsCollector;
import com.cloud.storage.StoragePool;
-import com.cloud.storage.VMTemplateVO;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
-@Component
@Local(value=StoragePoolAllocator.class)
public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(RandomStoragePoolAllocator.class);
@Override
- public boolean allocatorIsCorrectType(DiskProfile dskCh) {
- return true;
- }
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+ public List select(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
List suitablePools = new ArrayList();
- VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
- // Check that the allocator type is correct
- if (!allocatorIsCorrectType(dskCh)) {
- return suitablePools;
- }
long dcId = plan.getDataCenterId();
Long podId = plan.getPodId();
Long clusterId = plan.getClusterId();
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
- List pools = _storagePoolDao.listBy(dcId, podId, clusterId);
+ List pools = _storagePoolDao.listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
if (pools.size() == 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("No storage pools available for allocation, returning");
@@ -66,8 +54,6 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
return suitablePools;
}
- StatsCollector sc = StatsCollector.getInstance();
-
Collections.shuffle(pools);
if (s_logger.isDebugEnabled()) {
s_logger.debug("RandomStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
@@ -76,8 +62,9 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
if(suitablePools.size() == returnUpTo){
break;
}
- if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) {
- StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
+ StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
+
+ if (filter(avoid, pol, dskCh, plan)) {
suitablePools.add(pol);
}
}
diff --git a/pom.xml b/pom.xml
index 0cb711e50c6..540c057c2ea 100644
--- a/pom.xml
+++ b/pom.xml
@@ -90,6 +90,7 @@
1.4
0.9.8
0.10
+ build/replace.properties
@@ -161,9 +162,9 @@
usage
utils
deps/XenServerJava
+ engine
plugins
patches
- engine
framework
services
test
diff --git a/server/pom.xml b/server/pom.xml
index 59d1b15b911..0d7dada7bdc 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -130,6 +130,43 @@
+
+ maven-antrun-plugin
+ 1.7
+
+
+ generate-resource
+ generate-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java
index 51312a60eb1..746e8cb31ff 100644
--- a/server/src/com/cloud/api/query/QueryManagerImpl.java
+++ b/server/src/com/cloud/api/query/QueryManagerImpl.java
@@ -2190,12 +2190,15 @@ public class QueryManagerImpl extends ManagerBase implements QueryService {
Long domainId = cmd.getDomainId();
Long id = cmd.getId();
String keyword = cmd.getKeyword();
+ String name = cmd.getName();
Filter searchFilter = new Filter(DataCenterJoinVO.class, null, false, cmd.getStartIndex(), cmd.getPageSizeVal());
SearchCriteria sc = _dcJoinDao.createSearchCriteria();
if (id != null) {
sc.addAnd("id", SearchCriteria.Op.EQ, id);
+ } else if (name != null) {
+ sc.addAnd("name", SearchCriteria.Op.EQ, name);
} else {
if (keyword != null) {
SearchCriteria ssc = _dcJoinDao.createSearchCriteria();
diff --git a/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java b/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
index bf629896907..cff4cfc1b95 100644
--- a/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
+++ b/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
@@ -19,6 +19,7 @@ package com.cloud.cluster.agentlb.dao;
import java.util.Date;
import java.util.List;
+import javax.annotation.PostConstruct;
import javax.ejb.Local;
import org.apache.log4j.Logger;
@@ -37,30 +38,35 @@ import com.cloud.utils.db.SearchCriteria;
public class HostTransferMapDaoImpl extends GenericDaoBase implements HostTransferMapDao {
private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class);
- protected final SearchBuilder AllFieldsSearch;
- protected final SearchBuilder IntermediateStateSearch;
- protected final SearchBuilder ActiveSearch;
+ protected SearchBuilder AllFieldsSearch;
+ protected SearchBuilder IntermediateStateSearch;
+ protected SearchBuilder ActiveSearch;
public HostTransferMapDaoImpl() {
- AllFieldsSearch = createSearchBuilder();
- AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
- AllFieldsSearch.and("initialOwner", AllFieldsSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
- AllFieldsSearch.and("futureOwner", AllFieldsSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
- AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
- AllFieldsSearch.done();
-
- IntermediateStateSearch = createSearchBuilder();
- IntermediateStateSearch.and("futureOwner", IntermediateStateSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
- IntermediateStateSearch.and("initialOwner", IntermediateStateSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
- IntermediateStateSearch.and("state", IntermediateStateSearch.entity().getState(), SearchCriteria.Op.IN);
- IntermediateStateSearch.done();
-
- ActiveSearch = createSearchBuilder();
- ActiveSearch.and("created", ActiveSearch.entity().getCreated(), SearchCriteria.Op.GT);
- ActiveSearch.and("id", ActiveSearch.entity().getId(), SearchCriteria.Op.EQ);
- ActiveSearch.and("state", ActiveSearch.entity().getState(), SearchCriteria.Op.EQ);
- ActiveSearch.done();
-
+ super();
+ }
+
+ @PostConstruct
+ public void init() {
+ AllFieldsSearch = createSearchBuilder();
+ AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
+ AllFieldsSearch.and("initialOwner", AllFieldsSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
+ AllFieldsSearch.and("futureOwner", AllFieldsSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
+ AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
+ AllFieldsSearch.done();
+
+ IntermediateStateSearch = createSearchBuilder();
+ IntermediateStateSearch.and("futureOwner", IntermediateStateSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
+ IntermediateStateSearch.and("initialOwner", IntermediateStateSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
+ IntermediateStateSearch.and("state", IntermediateStateSearch.entity().getState(), SearchCriteria.Op.IN);
+ IntermediateStateSearch.done();
+
+ ActiveSearch = createSearchBuilder();
+ ActiveSearch.and("created", ActiveSearch.entity().getCreated(), SearchCriteria.Op.GT);
+ ActiveSearch.and("id", ActiveSearch.entity().getId(), SearchCriteria.Op.EQ);
+ ActiveSearch.and("state", ActiveSearch.entity().getState(), SearchCriteria.Op.EQ);
+ ActiveSearch.done();
+
}
@Override
diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java
index eb6fb242983..8a75a96845a 100755
--- a/server/src/com/cloud/configuration/Config.java
+++ b/server/src/com/cloud/configuration/Config.java
@@ -20,6 +20,8 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+
import com.cloud.agent.AgentManager;
import com.cloud.consoleproxy.ConsoleProxyManager;
import com.cloud.ha.HighAvailabilityManager;
@@ -28,7 +30,6 @@ import com.cloud.network.NetworkManager;
import com.cloud.network.router.VpcVirtualNetworkApplianceManager;
import com.cloud.server.ManagementServer;
import com.cloud.storage.StorageManager;
-import com.cloud.storage.allocator.StoragePoolAllocator;
import com.cloud.storage.secondary.SecondaryStorageVmManager;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.template.TemplateManager;
@@ -251,11 +252,10 @@ public enum Config {
XenGuestNetwork("Hidden", ManagementServer.class, String.class, "xen.guest.network.device", null, "Specify for guest network name label", null),
XenMaxNics("Advanced", AgentManager.class, Integer.class, "xen.nics.max", "7", "Maximum allowed nics for Vms created on Xen", null),
// VMware
- VmwarePrivateNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.private.vswitch", null, "Specify the vSwitch on host for private network", null),
- VmwarePublicNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.public.vswitch", null, "Specify the vSwitch on host for public network", null),
- VmwareGuestNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.guest.vswitch", null, "Specify the vSwitch on host for guest network", null),
VmwareUseNexusVSwitch("Network", ManagementServer.class, Boolean.class, "vmware.use.nexus.vswitch", "false", "Enable/Disable Cisco Nexus 1000v vSwitch in VMware environment", null),
- VmwareCreateFullClone("Advanced", ManagementServer.class, Boolean.class, "vmware.create.full.clone", "false", "If set to true, creates guest VMs as full clones on ESX", null),
+ VmwareUseDVSwitch("Network", ManagementServer.class, Boolean.class, "vmware.use.dvswitch", "false", "Enable/Disable Nexus/Vmware dvSwitch in VMware environment", null),
+ VmwarePortsPerDVPortGroup("Network", ManagementServer.class, Integer.class, "vmware.ports.per.dvportgroup", "256", "Default number of ports per Vmware dvPortGroup in VMware environment", null),
+ VmwareCreateFullClone("Advanced", ManagementServer.class, Boolean.class, "vmware.create.full.clone", "false", "If set to true, creates guest VMs as full clones on ESX", null),
VmwareServiceConsole("Advanced", ManagementServer.class, String.class, "vmware.service.console", "Service Console", "Specify the service console network name(for ESX hosts)", null),
VmwareManagementPortGroup("Advanced", ManagementServer.class, String.class, "vmware.management.portgroup", "Management Network", "Specify the management network name(for ESXi hosts)", null),
VmwareAdditionalVncPortRangeStart("Advanced", ManagementServer.class, Integer.class, "vmware.additional.vnc.portrange.start", "50000", "Start port number of additional VNC port range", null),
diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java
index 4933467bd8f..187ceab25dc 100755
--- a/server/src/com/cloud/deploy/FirstFitPlanner.java
+++ b/server/src/com/cloud/deploy/FirstFitPlanner.java
@@ -28,17 +28,23 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
-import com.cloud.dc.*;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.log4j.Logger;
import com.cloud.agent.manager.allocator.HostAllocator;
-import com.cloud.api.ApiDBUtils;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityManager;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterDetailsVO;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.DataCenter;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.HostPodVO;
+import com.cloud.dc.Pod;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
@@ -58,7 +64,6 @@ import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
-import com.cloud.storage.allocator.StoragePoolAllocator;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
diff --git a/server/src/com/cloud/event/ActionEventUtils.java b/server/src/com/cloud/event/ActionEventUtils.java
index 22589f1a292..3f3ca685f73 100755
--- a/server/src/com/cloud/event/ActionEventUtils.java
+++ b/server/src/com/cloud/event/ActionEventUtils.java
@@ -26,22 +26,23 @@ import com.cloud.user.UserContext;
import com.cloud.user.dao.AccountDao;
import com.cloud.user.dao.UserDao;
import com.cloud.utils.component.AnnotationInterceptor;
+import com.cloud.utils.component.ComponentContext;
import net.sf.cglib.proxy.Callback;
import net.sf.cglib.proxy.MethodInterceptor;
import net.sf.cglib.proxy.MethodProxy;
import org.apache.cloudstack.framework.events.EventBus;
import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.log4j.Logger;
+import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.stereotype.Component;
+import javax.annotation.PostConstruct;
+import javax.inject.Inject;
import java.lang.reflect.AnnotatedElement;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
-import javax.annotation.PostConstruct;
-import javax.inject.Inject;
-
@Component
public class ActionEventUtils {
private static final Logger s_logger = Logger.getLogger(ActionEventUtils.class);
@@ -49,14 +50,12 @@ public class ActionEventUtils {
private static EventDao _eventDao;
private static AccountDao _accountDao;
protected static UserDao _userDao;
-
- // get the event bus provider if configured
- protected static EventBus _eventBus;
+ protected static EventBus _eventBus = null;
@Inject EventDao eventDao;
@Inject AccountDao accountDao;
@Inject UserDao userDao;
-
+
public ActionEventUtils() {
}
@@ -65,8 +64,6 @@ public class ActionEventUtils {
_eventDao = eventDao;
_accountDao = accountDao;
_userDao = userDao;
-
- // TODO we will do injection of event bus later
}
public static Long onActionEvent(Long userId, Long accountId, Long domainId, String type, String description) {
@@ -156,7 +153,9 @@ public class ActionEventUtils {
private static void publishOnEventBus(long userId, long accountId, String eventCategory,
String eventType, Event.State state) {
- if (_eventBus == null) {
+ try {
+ _eventBus = ComponentContext.getComponent(EventBus.class);
+ } catch(NoSuchBeanDefinitionException nbe) {
return; // no provider is configured to provide events bus, so just return
}
diff --git a/server/src/com/cloud/event/AlertGenerator.java b/server/src/com/cloud/event/AlertGenerator.java
index 2dc7f3eb9e1..c56f9177af2 100644
--- a/server/src/com/cloud/event/AlertGenerator.java
+++ b/server/src/com/cloud/event/AlertGenerator.java
@@ -22,16 +22,17 @@ import com.cloud.dc.HostPodVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.server.ManagementServer;
-import org.apache.cloudstack.framework.events.*;
+import com.cloud.utils.component.ComponentContext;
+import org.apache.cloudstack.framework.events.EventBus;
+import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.log4j.Logger;
+import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.stereotype.Component;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.Map;
-
import javax.annotation.PostConstruct;
import javax.inject.Inject;
+import java.util.HashMap;
+import java.util.Map;
@Component
public class AlertGenerator {
@@ -39,13 +40,11 @@ public class AlertGenerator {
private static final Logger s_logger = Logger.getLogger(AlertGenerator.class);
private static DataCenterDao _dcDao;
private static HostPodDao _podDao;
-
- // get the event bus provider if configured
protected static EventBus _eventBus = null;
@Inject DataCenterDao dcDao;
@Inject HostPodDao podDao;
-
+
public AlertGenerator() {
}
@@ -56,8 +55,10 @@ public class AlertGenerator {
}
public static void publishAlertOnEventBus(String alertType, long dataCenterId, Long podId, String subject, String body) {
- if (_eventBus == null) {
- return; // no provider is configured to provider events bus, so just return
+ try {
+ _eventBus = ComponentContext.getComponent(EventBus.class);
+ } catch(NoSuchBeanDefinitionException nbe) {
+ return; // no provider is configured to provide events bus, so just return
}
org.apache.cloudstack.framework.events.Event event =
diff --git a/server/src/com/cloud/event/UsageEventUtils.java b/server/src/com/cloud/event/UsageEventUtils.java
index d59262af2ba..54012443848 100644
--- a/server/src/com/cloud/event/UsageEventUtils.java
+++ b/server/src/com/cloud/event/UsageEventUtils.java
@@ -23,17 +23,18 @@ import com.cloud.event.dao.UsageEventDao;
import com.cloud.server.ManagementServer;
import com.cloud.user.Account;
import com.cloud.user.dao.AccountDao;
-import org.apache.cloudstack.framework.events.EventBus;
+import com.cloud.utils.component.ComponentContext;
import org.apache.cloudstack.framework.events.Event;
+import org.apache.cloudstack.framework.events.EventBus;
import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.log4j.Logger;
+import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.stereotype.Component;
-import java.util.HashMap;
-import java.util.Map;
-
import javax.annotation.PostConstruct;
import javax.inject.Inject;
+import java.util.HashMap;
+import java.util.Map;
@Component
public class UsageEventUtils {
@@ -42,14 +43,12 @@ public class UsageEventUtils {
private static AccountDao _accountDao;
private static DataCenterDao _dcDao;
private static final Logger s_logger = Logger.getLogger(UsageEventUtils.class);
-
- // get the event bus provider if configured
- protected static EventBus _eventBus;
+ protected static EventBus _eventBus = null;
@Inject UsageEventDao usageEventDao;
@Inject AccountDao accountDao;
@Inject DataCenterDao dcDao;
-
+
public UsageEventUtils() {
}
@@ -116,8 +115,10 @@ public class UsageEventUtils {
private static void publishUsageEvent(String usageEventType, Long accountId, Long zoneId, String resourceType, String resourceUUID) {
- if (_eventBus == null) {
- return; // no provider is configured to provider events bus, so just return
+ try {
+ _eventBus = ComponentContext.getComponent(EventBus.class);
+ } catch(NoSuchBeanDefinitionException nbe) {
+ return; // no provider is configured to provide events bus, so just return
}
Account account = _accountDao.findById(accountId);
diff --git a/server/src/com/cloud/host/dao/HostDaoImpl.java b/server/src/com/cloud/host/dao/HostDaoImpl.java
index 697c3dc3826..07a42322ce3 100755
--- a/server/src/com/cloud/host/dao/HostDaoImpl.java
+++ b/server/src/com/cloud/host/dao/HostDaoImpl.java
@@ -128,6 +128,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao
@Inject protected ClusterDao _clusterDao;
public HostDaoImpl() {
+ super();
}
@PostConstruct
@@ -261,7 +262,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao
* UnmanagedDirectConnectSearch.and("lastPinged", UnmanagedDirectConnectSearch.entity().getLastPinged(),
* SearchCriteria.Op.LTEQ); UnmanagedDirectConnectSearch.cp(); UnmanagedDirectConnectSearch.cp();
*/
+ try {
HostTransferSearch = _hostTransferDao.createSearchBuilder();
+ } catch (Throwable e) {
+ s_logger.debug("error", e);
+ }
HostTransferSearch.and("id", HostTransferSearch.entity().getId(), SearchCriteria.Op.NULL);
UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(), JoinType.LEFTOUTER);
ClusterManagedSearch = _clusterDao.createSearchBuilder();
diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java
index 82893c4cafc..f527b73d481 100755
--- a/server/src/com/cloud/network/NetworkManagerImpl.java
+++ b/server/src/com/cloud/network/NetworkManagerImpl.java
@@ -355,9 +355,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
_firewallMgr.addSystemFirewallRules(addr, owner);
}
- long macAddress = NetUtils.createSequenceBasedMacAddress(addr.getMacAddress());
-
- return new PublicIp(addr, _vlanDao.findById(addr.getVlanId()), macAddress);
+ return PublicIp.createFromAddrAndVlan(addr, _vlanDao.findById(addr.getVlanId()));
}
@DB
@@ -404,8 +402,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
PublicIp ipToReturn = null;
if (sourceNatIp != null) {
- ipToReturn = new PublicIp(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId()),
- NetUtils.createSequenceBasedMacAddress(sourceNatIp.getMacAddress()));
+ ipToReturn = PublicIp.createFromAddrAndVlan(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId()));
} else {
ipToReturn = assignDedicateIpAddress(owner, guestNetwork.getId(), null, dcId, true);
}
@@ -490,8 +487,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
List publicIps = new ArrayList();
if (userIps != null && !userIps.isEmpty()) {
for (IPAddressVO userIp : userIps) {
- PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()),
- NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress()));
+ PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId()));
publicIps.add(publicIp);
}
}
@@ -2311,7 +2307,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
List userIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null);
if (userIps != null && !userIps.isEmpty()) {
for (IPAddressVO userIp : userIps) {
- PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress()));
+ PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId()));
publicIps.add(publicIp);
}
}
@@ -2847,7 +2843,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
List publicIps = new ArrayList();
if (userIps != null && !userIps.isEmpty()) {
for (IPAddressVO userIp : userIps) {
- PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress()));
+ PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId()));
publicIps.add(publicIp);
}
}
@@ -2877,7 +2873,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
IPAddressVO ip = _ipAddressDao.findByIdIncludingRemoved(staticNat.getSourceIpAddressId());
// ip can't be null, otherwise something wrong happened
ip.setAssociatedWithVmId(null);
- publicIp = new PublicIp(ip, _vlanDao.findById(ip.getVlanId()), NetUtils.createSequenceBasedMacAddress(ip.getMacAddress()));
+ publicIp = PublicIp.createFromAddrAndVlan(ip, _vlanDao.findById(ip.getVlanId()));
publicIps.add(publicIp);
break;
}
@@ -3154,7 +3150,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
if (userIps != null && !userIps.isEmpty()) {
for (IPAddressVO userIp : userIps) {
userIp.setState(State.Releasing);
- PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress()));
+ PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId()));
publicIpsToRelease.add(publicIp);
}
}
diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java
index 7b3717a161f..ac1bc874f9c 100644
--- a/server/src/com/cloud/network/NetworkModelImpl.java
+++ b/server/src/com/cloud/network/NetworkModelImpl.java
@@ -709,7 +709,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel {
return null;
}
- return new PublicIp(addr, _vlanDao.findById(addr.getVlanId()), NetUtils.createSequenceBasedMacAddress(addr.getMacAddress()));
+ return PublicIp.createFromAddrAndVlan(addr, _vlanDao.findById(addr.getVlanId()));
}
@Override
@@ -1405,7 +1405,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel {
return true;
}
IPAddressVO ipVO = _ipAddressDao.findById(userIp.getId());
- PublicIp publicIp = new PublicIp(ipVO, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(ipVO.getMacAddress()));
+ PublicIp publicIp = PublicIp.createFromAddrAndVlan(ipVO, _vlanDao.findById(userIp.getVlanId()));
if (!canIpUsedForService(publicIp, service, networkId)) {
return false;
}
@@ -1884,8 +1884,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel {
for (IpAddress addr : addrs) {
if (addr.isSourceNat()) {
sourceNatIp = _ipAddressDao.findById(addr.getId());
- return new PublicIp(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId()),
- NetUtils.createSequenceBasedMacAddress(sourceNatIp.getMacAddress()));
+ return PublicIp.createFromAddrAndVlan(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId()));
}
}
@@ -1930,9 +1929,9 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel {
}
int cidrSize = NetUtils.getIp6CidrSize(ip6Cidr);
- // Ipv6 cidr limit should be at least /64
- if (cidrSize < 64) {
- throw new InvalidParameterValueException("The cidr size of IPv6 network must be no less than 64 bits!");
+ // we only support cidr == 64
+ if (cidrSize != 64) {
+ throw new InvalidParameterValueException("The cidr size of IPv6 network must be 64 bits!");
}
}
diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java
index ce527b7dbe7..1708224b47e 100755
--- a/server/src/com/cloud/network/NetworkServiceImpl.java
+++ b/server/src/com/cloud/network/NetworkServiceImpl.java
@@ -1895,7 +1895,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService {
List publicIps = new ArrayList();
if (userIps != null && !userIps.isEmpty()) {
for (IPAddressVO userIp : userIps) {
- PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress()));
+ PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId()));
publicIps.add(publicIp);
}
}
diff --git a/server/src/com/cloud/network/NetworkStateListener.java b/server/src/com/cloud/network/NetworkStateListener.java
index bafe6d2d1f9..038e76988bf 100644
--- a/server/src/com/cloud/network/NetworkStateListener.java
+++ b/server/src/com/cloud/network/NetworkStateListener.java
@@ -23,24 +23,23 @@ import com.cloud.network.Network.Event;
import com.cloud.network.Network.State;
import com.cloud.network.dao.NetworkDao;
import com.cloud.server.ManagementServer;
+import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.fsm.StateListener;
import org.apache.cloudstack.framework.events.EventBus;
import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.log4j.Logger;
-
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.Map;
+import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import javax.inject.Inject;
+import java.util.HashMap;
+import java.util.Map;
public class NetworkStateListener implements StateListener {
@Inject protected UsageEventDao _usageEventDao;
@Inject protected NetworkDao _networkDao;
- // get the event bus provider if configured
- @Inject protected EventBus _eventBus;
+ protected static EventBus _eventBus = null;
private static final Logger s_logger = Logger.getLogger(NetworkStateListener.class);
@@ -63,7 +62,9 @@ public class NetworkStateListener implements StateListener ips = _ipAddressDao.listByAssociatedVpc(vpcId, false);
for (IPAddressVO ip : ips) {
- PublicIp publicIp = new PublicIp(ip, _vlanDao.findById(ip.getVlanId()),
- NetUtils.createSequenceBasedMacAddress(ip.getMacAddress()));
+ PublicIp publicIp = PublicIp.createFromAddrAndVlan(ip, _vlanDao.findById(ip.getVlanId()));
if ((ip.getState() == IpAddress.State.Allocated || ip.getState() == IpAddress.State.Allocating)
&& _vpcMgr.ipUsedInVpc(ip)&& !publicVlans.contains(publicIp.getVlanTag())) {
s_logger.debug("Allocating nic for router in vlan " + publicIp.getVlanTag());
diff --git a/server/src/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/com/cloud/network/vpc/VpcManagerImpl.java
index c9c13c97199..de6707e86da 100644
--- a/server/src/com/cloud/network/vpc/VpcManagerImpl.java
+++ b/server/src/com/cloud/network/vpc/VpcManagerImpl.java
@@ -2014,8 +2014,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{
PublicIp ipToReturn = null;
if (sourceNatIp != null) {
- ipToReturn = new PublicIp(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId()),
- NetUtils.createSequenceBasedMacAddress(sourceNatIp.getMacAddress()));
+ ipToReturn = PublicIp.createFromAddrAndVlan(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId()));
} else {
ipToReturn = _ntwkMgr.assignDedicateIpAddress(owner, null, vpc.getId(), dcId, true);
}
diff --git a/server/src/com/cloud/resource/ResourceManager.java b/server/src/com/cloud/resource/ResourceManager.java
index 266ba948afc..b0ab9269529 100755
--- a/server/src/com/cloud/resource/ResourceManager.java
+++ b/server/src/com/cloud/resource/ResourceManager.java
@@ -100,6 +100,7 @@ public interface ResourceManager extends ResourceService{
public List listHostsInClusterByStatus(long clusterId, Status status);
public List listAllUpAndEnabledHostsInOneZoneByType(Host.Type type, long dcId);
+ public List listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType type, long dcId);
public List listAllHostsInOneZoneByType(Host.Type type, long dcId);
diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java
index 14628c1fe8d..a4a08aa4046 100755
--- a/server/src/com/cloud/resource/ResourceManagerImpl.java
+++ b/server/src/com/cloud/resource/ResourceManagerImpl.java
@@ -455,6 +455,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
+ cmd.getHypervisor());
}
+ if (hypervisorType == HypervisorType.VMware) {
+ Map allParams = cmd.getFullUrlParams();
+ discoverer.putParam(allParams);
+ }
+
List result = new ArrayList();
long clusterId = 0;
@@ -2822,4 +2827,17 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
}
return pcs;
}
+
+ @Override
+ public List listAllUpAndEnabledHostsInOneZoneByHypervisor(
+ HypervisorType type, long dcId) {
+ SearchCriteriaService sc = SearchCriteria2
+ .create(HostVO.class);
+ sc.addAnd(sc.getEntity().getHypervisorType(), Op.EQ, type);
+ sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId);
+ sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up);
+ sc.addAnd(sc.getEntity().getResourceState(), Op.EQ,
+ ResourceState.Enabled);
+ return sc.list();
+ }
}
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index 9daf77db38a..f51079240c9 100755
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -58,6 +58,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
@@ -69,7 +70,6 @@ import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
-
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.BackupSnapshotCommand;
import com.cloud.agent.api.CleanupSnapshotBackupCommand;
@@ -78,7 +78,6 @@ import com.cloud.agent.api.ManageSnapshotCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.agent.api.storage.DeleteTemplateCommand;
import com.cloud.agent.api.storage.DeleteVolumeCommand;
-
import com.cloud.agent.manager.Commands;
import com.cloud.alert.AlertManager;
import com.cloud.api.ApiDBUtils;
@@ -100,10 +99,10 @@ import com.cloud.dc.HostPodVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
+import com.cloud.deploy.DataCenterDeployment;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.domain.dao.DomainDao;
-
import com.cloud.event.dao.EventDao;
-import com.cloud.event.dao.UsageEventDao;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.ConnectionException;
import com.cloud.exception.InsufficientCapacityException;
@@ -113,7 +112,6 @@ import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceInUseException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.StorageUnavailableException;
-
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
@@ -132,8 +130,6 @@ import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Volume.Type;
-import com.cloud.storage.allocator.StoragePoolAllocator;
-
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotPolicyDao;
@@ -146,7 +142,6 @@ import com.cloud.storage.dao.VMTemplateS3Dao;
import com.cloud.storage.dao.VMTemplateSwiftDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeHostDao;
-
import com.cloud.storage.download.DownloadMonitor;
import com.cloud.storage.listener.StoragePoolMonitor;
import com.cloud.storage.listener.VolumeStateListener;
@@ -156,7 +151,11 @@ import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.storage.snapshot.SnapshotScheduler;
import com.cloud.tags.dao.ResourceTagDao;
import com.cloud.template.TemplateManager;
-import com.cloud.user.*;
+import com.cloud.user.Account;
+import com.cloud.user.AccountManager;
+import com.cloud.user.ResourceLimitService;
+import com.cloud.user.User;
+import com.cloud.user.UserContext;
import com.cloud.user.dao.AccountDao;
import com.cloud.user.dao.UserDao;
import com.cloud.utils.NumbersUtil;
@@ -165,20 +164,28 @@ import com.cloud.utils.UriUtils;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.concurrency.NamedThreadFactory;
-import com.cloud.utils.db.*;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.GenericSearchBuilder;
+import com.cloud.utils.db.GlobalLock;
+import com.cloud.utils.db.JoinBuilder;
import com.cloud.utils.db.JoinBuilder.JoinType;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
+import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
-
import com.cloud.vm.DiskProfile;
import com.cloud.vm.UserVmManager;
import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.VirtualMachineProfile;
import com.cloud.vm.VirtualMachineProfileImpl;
-
-import com.cloud.vm.VirtualMachine.State;
-import com.cloud.vm.dao.*;
+import com.cloud.vm.dao.ConsoleProxyDao;
+import com.cloud.vm.dao.DomainRouterDao;
+import com.cloud.vm.dao.SecondaryStorageVmDao;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
@Component
@Local(value = { StorageManager.class, StorageService.class })
@@ -193,24 +200,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Inject
protected TemplateManager _tmpltMgr;
@Inject
- protected AsyncJobManager _asyncMgr;
- @Inject
- protected SnapshotManager _snapshotMgr;
- @Inject
- protected SnapshotScheduler _snapshotScheduler;
- @Inject
protected AccountManager _accountMgr;
@Inject
protected ConfigurationManager _configMgr;
@Inject
- protected ConsoleProxyManager _consoleProxyMgr;
- @Inject
- protected SecondaryStorageVmManager _secStorageMgr;
- @Inject
- protected NetworkModel _networkMgr;
- @Inject
- protected ServiceOfferingDao _serviceOfferingDao;
- @Inject
protected VolumeDao _volsDao;
@Inject
protected HostDao _hostDao;
@@ -275,30 +268,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Inject
protected ClusterDao _clusterDao;
@Inject
- protected VirtualMachineManager _vmMgr;
- @Inject
- protected DomainRouterDao _domrDao;
- @Inject
- protected SecondaryStorageVmDao _secStrgDao;
- @Inject
protected StoragePoolWorkDao _storagePoolWorkDao;
@Inject
protected HypervisorGuruManager _hvGuruMgr;
@Inject
protected VolumeDao _volumeDao;
@Inject
- protected OCFS2Manager _ocfs2Mgr;
- @Inject
- protected ResourceLimitService _resourceLimitMgr;
- @Inject
protected SecondaryStorageVmManager _ssvmMgr;
@Inject
- protected ResourceManager _resourceMgr;
- @Inject
- protected DownloadMonitor _downloadMonitor;
- @Inject
- protected ResourceTagDao _resourceTagDao;
- @Inject
protected List _storagePoolAllocators;
@Inject
ConfigurationDao _configDao;
@@ -464,14 +441,19 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
VMInstanceVO vm, final Set avoid) {
VirtualMachineProfile profile = new VirtualMachineProfileImpl(
- vm);
+ vm);
for (StoragePoolAllocator allocator : _storagePoolAllocators) {
- final List poolList = allocator.allocateToPool(
- dskCh, profile, dc.getId(), pod.getId(), clusterId, hostId,
- avoid, 1);
- if (poolList != null && !poolList.isEmpty()) {
- return (StoragePool)this.dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
- }
+
+ ExcludeList avoidList = new ExcludeList();
+ for(StoragePool pool : avoid){
+ avoidList.addPool(pool.getId());
+ }
+ DataCenterDeployment plan = new DataCenterDeployment(dc.getId(), pod.getId(), clusterId, hostId, null, null);
+
+ final List poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, 1);
+ if (poolList != null && !poolList.isEmpty()) {
+ return (StoragePool)this.dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
+ }
}
return null;
}
@@ -786,7 +768,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
String scope = cmd.getScope();
if (scope != null) {
try {
- scopeType = Enum.valueOf(ScopeType.class, scope);
+ scopeType = Enum.valueOf(ScopeType.class, scope.toUpperCase());
} catch (Exception e) {
throw new InvalidParameterValueException("invalid scope"
+ scope);
diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java
index a69607f1f3f..336dbcbf336 100644
--- a/server/src/com/cloud/storage/VolumeManagerImpl.java
+++ b/server/src/com/cloud/storage/VolumeManagerImpl.java
@@ -51,6 +51,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
@@ -115,7 +116,6 @@ import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Volume.Event;
import com.cloud.storage.Volume.Type;
-import com.cloud.storage.allocator.StoragePoolAllocator;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotPolicyDao;
diff --git a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java
deleted file mode 100755
index d747d25c7b5..00000000000
--- a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java
+++ /dev/null
@@ -1,209 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.storage.allocator;
-
-import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-
-import com.cloud.capacity.CapacityManager;
-import com.cloud.configuration.dao.ConfigurationDao;
-import com.cloud.dc.ClusterVO;
-import com.cloud.dc.dao.ClusterDao;
-import com.cloud.deploy.DataCenterDeployment;
-import com.cloud.deploy.DeploymentPlan;
-import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.host.Host;
-import com.cloud.server.StatsCollector;
-import com.cloud.storage.Storage.StoragePoolType;
-import com.cloud.storage.StorageManager;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.StoragePoolStatus;
-import com.cloud.storage.VMTemplateStoragePoolVO;
-import com.cloud.storage.VMTemplateStorageResourceAssoc;
-import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.Volume;
-import com.cloud.storage.Volume.Type;
-import com.cloud.storage.dao.StoragePoolDao;
-import com.cloud.storage.dao.StoragePoolHostDao;
-import com.cloud.storage.dao.VMTemplateDao;
-import com.cloud.storage.dao.VMTemplateHostDao;
-import com.cloud.storage.dao.VMTemplatePoolDao;
-import com.cloud.storage.dao.VolumeDao;
-import com.cloud.storage.swift.SwiftManager;
-import com.cloud.template.TemplateManager;
-import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.component.AdapterBase;
-import com.cloud.vm.DiskProfile;
-import com.cloud.vm.VirtualMachine;
-import com.cloud.vm.VirtualMachineProfile;
-
-public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator {
- private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class);
- @Inject TemplateManager _tmpltMgr;
- @Inject StorageManager _storageMgr;
- @Inject StoragePoolDao _storagePoolDao;
- @Inject VMTemplateHostDao _templateHostDao;
- @Inject VMTemplatePoolDao _templatePoolDao;
- @Inject VMTemplateDao _templateDao;
- @Inject VolumeDao _volumeDao;
- @Inject StoragePoolHostDao _poolHostDao;
- @Inject ConfigurationDao _configDao;
- @Inject ClusterDao _clusterDao;
- @Inject SwiftManager _swiftMgr;
- @Inject CapacityManager _capacityMgr;
- @Inject DataStoreManager dataStoreMgr;
- protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1);
- long _extraBytesPerVolume = 0;
- Random _rand;
- boolean _dontMatter;
-
- @Override
- public boolean configure(String name, Map params) throws ConfigurationException {
- super.configure(name, params);
-
- Map configs = _configDao.getConfiguration(null, params);
-
- String globalStorageOverprovisioningFactor = configs.get("storage.overprovisioning.factor");
- _storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
-
- _extraBytesPerVolume = 0;
-
- _rand = new Random(System.currentTimeMillis());
-
- _dontMatter = Boolean.parseBoolean(configs.get("storage.overwrite.provisioning"));
-
- return true;
- }
-
- abstract boolean allocatorIsCorrectType(DiskProfile dskCh);
-
- protected boolean templateAvailable(long templateId, long poolId) {
- VMTemplateStorageResourceAssoc thvo = _templatePoolDao.findByPoolTemplate(poolId, templateId);
- if (thvo != null) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Template id : " + templateId + " status : " + thvo.getDownloadState().toString());
- }
- return (thvo.getDownloadState()==Status.DOWNLOADED);
- } else {
- return false;
- }
- }
-
- protected boolean localStorageAllocationNeeded(DiskProfile dskCh) {
- return dskCh.useLocalStorage();
- }
-
- protected boolean poolIsCorrectType(DiskProfile dskCh, StoragePool pool) {
- boolean localStorageAllocationNeeded = localStorageAllocationNeeded(dskCh);
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Is localStorageAllocationNeeded? "+ localStorageAllocationNeeded);
- s_logger.debug("Is storage pool shared? "+ pool.isShared());
- }
-
- return ((!localStorageAllocationNeeded && pool.getPoolType().isShared()) || (localStorageAllocationNeeded && !pool.getPoolType().isShared()));
- }
-
- protected boolean checkPool(ExcludeList avoid, StoragePoolVO pool, DiskProfile dskCh, VMTemplateVO template, List templatesInPool,
- StatsCollector sc, DeploymentPlan plan) {
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName()+ " ,poolId: "+ pool.getId());
- }
- StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
- if (avoid.shouldAvoid(pol)) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("StoragePool is in avoid set, skipping this pool");
- }
- return false;
- }
- if(dskCh.getType().equals(Type.ROOT) && pool.getPoolType().equals(StoragePoolType.Iscsi)){
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Disk needed for ROOT volume, but StoragePoolType is Iscsi, skipping this and trying other available pools");
- }
- return false;
- }
-
- //by default, all pools are up when successfully added
- //don't return the pool if not up (if in maintenance/prepareformaintenance/errorinmaintenance)
- if(!pool.getStatus().equals(StoragePoolStatus.Up)){
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("StoragePool status is not UP, status is: "+pool.getStatus().name()+", skipping this pool");
- }
- return false;
- }
-
- // Check that the pool type is correct
- if (!poolIsCorrectType(dskCh, pol)) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("StoragePool is not of correct type, skipping this pool");
- }
- return false;
- }
-
- /*hypervisor type is correct*/
- // TODO : when creating a standalone volume, offering is passed as NULL, need to
- // refine the logic of checking hypervisorType based on offering info
- Long clusterId = pool.getClusterId();
- ClusterVO cluster = _clusterDao.findById(clusterId);
- if (!(cluster.getHypervisorType() == dskCh.getHypersorType())) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool");
- }
- return false;
- }
-
-
- // check capacity
- Volume volume = _volumeDao.findById(dskCh.getVolumeId());
- List requestVolumes = new ArrayList();
- requestVolumes.add(volume);
- return _storageMgr.storagePoolHasEnoughSpace(requestVolumes, pol);
- }
-
-
-
- @Override
- public String chooseStorageIp(VirtualMachine vm, Host host, Host storage) {
- return storage.getStorageIpAddress();
- }
-
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, long dcId, long podId, Long clusterId, Long hostId, Set extends StoragePool> avoids, int returnUpTo) {
-
- ExcludeList avoid = new ExcludeList();
- for(StoragePool pool : avoids){
- avoid.addPool(pool.getId());
- }
-
- DataCenterDeployment plan = new DataCenterDeployment(dcId, podId, clusterId, hostId, null, null);
- return allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo);
- }
-
-}
diff --git a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java
deleted file mode 100644
index f0df3a6f001..00000000000
--- a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java
+++ /dev/null
@@ -1,175 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.storage.allocator;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import javax.ejb.Local;
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-
-import com.cloud.deploy.DeploymentPlan;
-import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.offering.ServiceOffering;
-import com.cloud.server.StatsCollector;
-import com.cloud.storage.DiskOfferingVO;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.Storage.StoragePoolType;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.dao.DiskOfferingDao;
-import com.cloud.user.Account;
-import com.cloud.vm.DiskProfile;
-import com.cloud.vm.VirtualMachine;
-import com.cloud.vm.VirtualMachineProfile;
-
-@Local(value=StoragePoolAllocator.class)
-public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator {
- private static final Logger s_logger = Logger.getLogger(FirstFitStoragePoolAllocator.class);
- protected String _allocationAlgorithm = "random";
-
- @Inject
- DiskOfferingDao _diskOfferingDao;
-
- @Override
- public boolean allocatorIsCorrectType(DiskProfile dskCh) {
- return !localStorageAllocationNeeded(dskCh);
- }
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
-
-
- VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
- Account account = null;
- if(vmProfile.getVirtualMachine() != null){
- account = vmProfile.getOwner();
- }
-
- List suitablePools = new ArrayList();
-
- // Check that the allocator type is correct
- if (!allocatorIsCorrectType(dskCh)) {
- return suitablePools;
- }
- long dcId = plan.getDataCenterId();
- Long podId = plan.getPodId();
- Long clusterId = plan.getClusterId();
-
- if(dskCh.getTags() != null && dskCh.getTags().length != 0){
- s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags()));
- }else{
- s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
- }
-
- List pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags(), null);
- if (pools.size() == 0) {
- if (s_logger.isDebugEnabled()) {
- String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString();
- s_logger.debug("No storage pools available for " + storageType + " volume allocation, returning");
- }
- return suitablePools;
- }
-
- StatsCollector sc = StatsCollector.getInstance();
-
- //FixMe: We are ignoring userdispersing algorithm when account is null. Find a way to get account ID when VMprofile is null
- if(_allocationAlgorithm.equals("random") || _allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
- // Shuffle this so that we don't check the pools in the same order.
- Collections.shuffle(pools);
- }else if(_allocationAlgorithm.equals("userdispersing")){
- pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
- }
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("FirstFitStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
- }
-
- DiskOfferingVO diskOffering = _diskOfferingDao.findById(dskCh.getDiskOfferingId());
- for (StoragePoolVO pool: pools) {
- if(suitablePools.size() == returnUpTo){
- break;
- }
- if (diskOffering.getSystemUse() && pool.getPoolType() == StoragePoolType.RBD) {
- s_logger.debug("Skipping RBD pool " + pool.getName() + " as a suitable pool. RBD is not supported for System VM's");
- continue;
- }
-
- if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) {
- StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
- suitablePools.add(pol);
- }
- }
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools");
- }
-
- return suitablePools;
- }
-
- private List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List pools, Account account) {
- if(account == null){
- return pools;
- }
- long dcId = plan.getDataCenterId();
- Long podId = plan.getPodId();
- Long clusterId = plan.getClusterId();
-
- List poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("List of pools in ascending order of number of volumes for account id: "+ account.getAccountId() + " is: "+ poolIdsByVolCount);
- }
-
- //now filter the given list of Pools by this ordered list
- Map poolMap = new HashMap();
- for (StoragePoolVO pool : pools) {
- poolMap.put(pool.getId(), pool);
- }
- List matchingPoolIds = new ArrayList(poolMap.keySet());
-
- poolIdsByVolCount.retainAll(matchingPoolIds);
-
- List reorderedPools = new ArrayList();
- for(Long id: poolIdsByVolCount){
- reorderedPools.add(poolMap.get(id));
- }
-
- return reorderedPools;
- }
-
- @Override
- public boolean configure(String name, Map params) throws ConfigurationException {
- super.configure(name, params);
-
- if (_configDao != null) {
- Map configs = _configDao.getConfiguration(params);
- String allocationAlgorithm = configs.get("vm.allocation.algorithm");
- if (allocationAlgorithm != null) {
- _allocationAlgorithm = allocationAlgorithm;
- }
- }
- return true;
- }
-}
diff --git a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java
deleted file mode 100644
index 24b4dabe281..00000000000
--- a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java
+++ /dev/null
@@ -1,288 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.storage.allocator;
-
-import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import javax.ejb.Local;
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-
-import com.cloud.capacity.CapacityVO;
-import com.cloud.capacity.dao.CapacityDao;
-import com.cloud.configuration.dao.ConfigurationDao;
-import com.cloud.deploy.DeploymentPlan;
-import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.offering.ServiceOffering;
-import com.cloud.service.dao.ServiceOfferingDao;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.StoragePoolHostVO;
-import com.cloud.storage.Volume;
-import com.cloud.storage.VolumeVO;
-import com.cloud.storage.dao.StoragePoolHostDao;
-import com.cloud.utils.DateUtil;
-import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.db.GenericSearchBuilder;
-import com.cloud.utils.db.JoinBuilder;
-import com.cloud.utils.db.SearchBuilder;
-import com.cloud.utils.db.SearchCriteria;
-import com.cloud.utils.db.SearchCriteria.Func;
-import com.cloud.vm.DiskProfile;
-import com.cloud.vm.UserVmVO;
-import com.cloud.vm.VMInstanceVO;
-import com.cloud.vm.VirtualMachine;
-import com.cloud.vm.VirtualMachine.State;
-import com.cloud.vm.VirtualMachineProfile;
-import com.cloud.vm.dao.UserVmDao;
-import com.cloud.vm.dao.VMInstanceDao;
-
-//
-// TODO
-// Rush to make LocalStoragePoolAllocator use static allocation status, we should revisit the overall
-// allocation process to make it more reliable in next release. The code put in here is pretty ugly
-//
-@Local(value = StoragePoolAllocator.class)
-public class LocalStoragePoolAllocator extends FirstFitStoragePoolAllocator {
- private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class);
-
- @Inject
- StoragePoolHostDao _poolHostDao;
- @Inject
- VMInstanceDao _vmInstanceDao;
- @Inject
- UserVmDao _vmDao;
- @Inject
- ServiceOfferingDao _offeringDao;
- @Inject
- CapacityDao _capacityDao;
- @Inject
- ConfigurationDao _configDao;
-
- protected GenericSearchBuilder VmsOnPoolSearch;
-
- private int _secondsToSkipStoppedVMs = 86400;
-
- @Override
- public boolean allocatorIsCorrectType(DiskProfile dskCh) {
- return localStorageAllocationNeeded(dskCh);
- }
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
-
- List suitablePools = new ArrayList();
-
- // Check that the allocator type is correct
- if (!allocatorIsCorrectType(dskCh)) {
- return suitablePools;
- }
-
- ExcludeList myAvoids = new ExcludeList(avoid.getDataCentersToAvoid(), avoid.getPodsToAvoid(), avoid.getClustersToAvoid(), avoid.getHostsToAvoid(), avoid.getPoolsToAvoid());
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
- }
-
- // data disk and host identified from deploying vm (attach volume case)
- if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
- List hostPools = _poolHostDao.listByHostId(plan.getHostId());
- for (StoragePoolHostVO hostPool: hostPools) {
- StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId());
- if (pool != null && pool.isLocal()) {
- s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
- StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
- suitablePools.add(pol);
- }
-
- if (suitablePools.size() == returnUpTo) {
- break;
- }
- }
- } else {
- List availablePool;
- while (!(availablePool = super.allocateToPool(dskCh, vmProfile, plan, myAvoids, 1)).isEmpty()) {
- StoragePool pool = availablePool.get(0);
- myAvoids.addPool(pool.getId());
- List hostsInSPool = _poolHostDao.listByPoolId(pool.getId());
- assert (hostsInSPool.size() == 1) : "Local storage pool should be one host per pool";
-
- s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
- suitablePools.add(pool);
-
- if (suitablePools.size() == returnUpTo) {
- break;
- }
- }
- }
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
- }
-
- if (suitablePools.isEmpty()) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Unable to find storage pool to fit the vm");
- }
- }
- return suitablePools;
- }
-
- // we don't need to check host capacity now, since hostAllocators will do that anyway
- private boolean hostHasCpuMemoryCapacity(long hostId, List vmOnHost, VMInstanceVO vm) {
-
- ServiceOffering so = _offeringDao.findById(vm.getServiceOfferingId());
-
- long usedMemory = calcHostAllocatedCpuMemoryCapacity(vmOnHost, CapacityVO.CAPACITY_TYPE_MEMORY);
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Calculated static-allocated memory for VMs on host " + hostId + ": " + usedMemory + " bytes, requesting memory: " + (so != null ? so.getRamSize() * 1024L * 1024L : "")
- + " bytes");
- }
-
- SearchCriteria sc = _capacityDao.createSearchCriteria();
- sc.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
- sc.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_MEMORY);
- List capacities = _capacityDao.search(sc, null);
- if (capacities.size() > 0) {
- if (capacities.get(0).getTotalCapacity() < usedMemory + (so != null ? so.getRamSize() * 1024L * 1024L : 0)) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Host " + hostId + " runs out of memory capacity");
- }
- return false;
- }
- } else {
- s_logger.warn("Host " + hostId + " has not reported memory capacity yet");
- return false;
- }
-
- long usedCpu = calcHostAllocatedCpuMemoryCapacity(vmOnHost, CapacityVO.CAPACITY_TYPE_CPU);
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Calculated static-allocated CPU for VMs on host " + hostId + ": " + usedCpu + " GHz, requesting cpu: " + (so != null ? so.getCpu() * so.getSpeed() : "") + " GHz");
- }
-
- sc = _capacityDao.createSearchCriteria();
- sc.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
- sc.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_CPU);
- capacities = _capacityDao.search(sc, null);
- if (capacities.size() > 0) {
- if (capacities.get(0).getTotalCapacity() < usedCpu + (so != null ? so.getCpu() * so.getSpeed() : 0)) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Host " + hostId + " runs out of CPU capacity");
- }
- return false;
- }
- } else {
- s_logger.warn("Host " + hostId + " has not reported CPU capacity yet");
- return false;
- }
-
- return true;
- }
-
- private boolean skipCalculation(VMInstanceVO vm) {
- if (vm == null) {
- return true;
- }
-
- if (vm.getState() == State.Expunging) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Skip counting capacity for Expunging VM : " + vm.getInstanceName());
- }
- return true;
- }
-
- if (vm.getState() == State.Destroyed && vm.getType() != VirtualMachine.Type.User) {
- return true;
- }
-
- if (vm.getState() == State.Stopped || vm.getState() == State.Destroyed) {
- // for stopped/Destroyed VMs, we will skip counting it if it hasn't been used for a while
-
- long millisecondsSinceLastUpdate = DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime();
- if (millisecondsSinceLastUpdate > _secondsToSkipStoppedVMs * 1000L) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Skip counting vm " + vm.getInstanceName() + " in capacity allocation as it has been stopped for " + millisecondsSinceLastUpdate / 60000 + " minutes");
- }
- return true;
- }
- }
- return false;
- }
-
- private long calcHostAllocatedCpuMemoryCapacity(List vmOnHost, short capacityType) {
- assert (capacityType == CapacityVO.CAPACITY_TYPE_MEMORY || capacityType == CapacityVO.CAPACITY_TYPE_CPU) : "Invalid capacity type passed in calcHostAllocatedCpuCapacity()";
-
- long usedCapacity = 0;
- for (Long vmId : vmOnHost) {
- VMInstanceVO vm = _vmInstanceDao.findById(vmId);
- if (skipCalculation(vm)) {
- continue;
- }
-
- ServiceOffering so = _offeringDao.findById(vm.getServiceOfferingId());
- if (vm.getType() == VirtualMachine.Type.User) {
- UserVmVO userVm = _vmDao.findById(vm.getId());
- if (userVm == null) {
- continue;
- }
- }
-
- if (capacityType == CapacityVO.CAPACITY_TYPE_MEMORY) {
- usedCapacity += so.getRamSize() * 1024L * 1024L;
- } else if (capacityType == CapacityVO.CAPACITY_TYPE_CPU) {
- usedCapacity += so.getCpu() * so.getSpeed();
- }
- }
-
- return usedCapacity;
- }
-
- @Override
- public boolean configure(String name, Map params) throws ConfigurationException {
- super.configure(name, params);
-
- _storageOverprovisioningFactor = new BigDecimal(1);
- _extraBytesPerVolume = NumbersUtil.parseLong((String) params.get("extra.bytes.per.volume"), 50 * 1024L * 1024L);
-
- Map configs = _configDao.getConfiguration("management-server", params);
- String value = configs.get("vm.resource.release.interval");
- _secondsToSkipStoppedVMs = NumbersUtil.parseInt(value, 86400);
-
- VmsOnPoolSearch = _vmInstanceDao.createSearchBuilder(Long.class);
- VmsOnPoolSearch.select(null, Func.DISTINCT, VmsOnPoolSearch.entity().getId());
- VmsOnPoolSearch.and("removed", VmsOnPoolSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
- VmsOnPoolSearch.and("state", VmsOnPoolSearch.entity().getState(), SearchCriteria.Op.NIN);
-
- SearchBuilder sbVolume = _volumeDao.createSearchBuilder();
- sbVolume.and("poolId", sbVolume.entity().getPoolId(), SearchCriteria.Op.EQ);
-
- VmsOnPoolSearch.join("volumeJoin", sbVolume, VmsOnPoolSearch.entity().getId(), sbVolume.entity().getInstanceId(), JoinBuilder.JoinType.INNER);
-
- sbVolume.done();
- VmsOnPoolSearch.done();
-
- return true;
- }
-
- public LocalStoragePoolAllocator() {
- }
-}
diff --git a/server/src/com/cloud/storage/dao/StoragePoolDao.java b/server/src/com/cloud/storage/dao/StoragePoolDao.java
index 64bbd5fb5ed..28ead9c2945 100644
--- a/server/src/com/cloud/storage/dao/StoragePoolDao.java
+++ b/server/src/com/cloud/storage/dao/StoragePoolDao.java
@@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import com.cloud.storage.StoragePoolStatus;
@@ -37,7 +38,7 @@ public interface StoragePoolDao extends GenericDao {
/**
* @param datacenterId -- the id of the datacenter (availability zone)
*/
- List listBy(long datacenterId, long podId, Long clusterId);
+ List listBy(long datacenterId, long podId, Long clusterId, ScopeType scope);
/**
* Set capacity of storage pool in bytes
@@ -71,9 +72,9 @@ public interface StoragePoolDao extends GenericDao {
* @param details details to match. All must match for the pool to be returned.
* @return List of StoragePoolVO
*/
- List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details);
+ List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details, ScopeType scope);
- List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared);
+ List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags);
/**
* Find pool by UUID.
@@ -104,4 +105,9 @@ public interface StoragePoolDao extends GenericDao {
List listByStatusInZone(long dcId, StoragePoolStatus status);
List listPoolsByCluster(long clusterId);
+
+ List findLocalStoragePoolsByTags(long dcId, long podId,
+ Long clusterId, String[] tags);
+
+ List findZoneWideStoragePoolsByTags(long dcId, String[] tags);
}
diff --git a/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java b/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java
index ebf2943ec9c..28b4dbc5c18 100644
--- a/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java
+++ b/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java
@@ -28,14 +28,13 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.springframework.stereotype.Component;
import com.cloud.host.Status;
-import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StoragePoolDetailVO;
import com.cloud.storage.StoragePoolStatus;
-
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
@@ -43,6 +42,8 @@ import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
+import com.cloud.utils.db.SearchCriteria2;
+import com.cloud.utils.db.SearchCriteriaService;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
@@ -59,8 +60,11 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
@Inject protected StoragePoolDetailsDao _detailsDao;
- private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and (";
+ private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
+ private final String ZoneWideDetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
+ private final String ZoneWideDetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
+
private final String FindPoolTagDetails = "SELECT storage_pool_details.name FROM storage_pool_details WHERE pool_id = ? and value = ?";
protected StoragePoolDaoImpl() {
@@ -77,6 +81,8 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
DcPodSearch = createSearchBuilder();
DcPodSearch.and("datacenterId", DcPodSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
+ DcPodSearch.and("status", DcPodSearch.entity().getStatus(), SearchCriteria.Op.EQ);
+ DcPodSearch.and("scope", DcPodSearch.entity().getScope(), SearchCriteria.Op.EQ);
DcPodSearch.and().op("nullpod", DcPodSearch.entity().getPodId(), SearchCriteria.Op.NULL);
DcPodSearch.or("podId", DcPodSearch.entity().getPodId(), SearchCriteria.Op.EQ);
DcPodSearch.cp();
@@ -87,6 +93,8 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
DcPodAnyClusterSearch = createSearchBuilder();
DcPodAnyClusterSearch.and("datacenterId", DcPodAnyClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
+ DcPodAnyClusterSearch.and("status", DcPodAnyClusterSearch.entity().getStatus(), SearchCriteria.Op.EQ);
+ DcPodAnyClusterSearch.and("scope", DcPodAnyClusterSearch.entity().getScope(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.and().op("nullpod", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.NULL);
DcPodAnyClusterSearch.or("podId", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.cp();
@@ -192,11 +200,13 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
}
@Override
- public List listBy(long datacenterId, long podId, Long clusterId) {
+ public List listBy(long datacenterId, long podId, Long clusterId, ScopeType scope) {
if (clusterId != null) {
SearchCriteria sc = DcPodSearch.create();
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
+ sc.setParameters("status", Status.Up);
+ sc.setParameters("scope", scope);
sc.setParameters("cluster", clusterId);
return listBy(sc);
@@ -204,6 +214,8 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
SearchCriteria sc = DcPodAnyClusterSearch.create();
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
+ sc.setParameters("status", Status.Up);
+ sc.setParameters("scope", scope);
return listBy(sc);
}
}
@@ -242,11 +254,12 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
@DB
@Override
- public List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details) {
+ public List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details, ScopeType scope) {
StringBuilder sql = new StringBuilder(DetailsSqlPrefix);
if (clusterId != null) {
sql.append("storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND (");
}
+
for (Map.Entry detail : details.entrySet()) {
sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR ");
}
@@ -259,6 +272,7 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setLong(i++, podId);
+ pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
@@ -283,26 +297,67 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
}
@Override
- public List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared) {
+ public List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags) {
List storagePools = null;
if (tags == null || tags.length == 0) {
- storagePools = listBy(dcId, podId, clusterId);
+ storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
} else {
Map details = tagsToDetails(tags);
- storagePools = findPoolsByDetails(dcId, podId, clusterId, details);
+ storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.CLUSTER);
}
-
- if (shared == null) {
- return storagePools;
+
+ return storagePools;
+ }
+
+ @Override
+ public List findLocalStoragePoolsByTags(long dcId, long podId, Long clusterId, String[] tags) {
+ List storagePools = null;
+ if (tags == null || tags.length == 0) {
+ storagePools = listBy(dcId, podId, clusterId, ScopeType.HOST);
} else {
- List filteredStoragePools = new ArrayList(storagePools);
- for (StoragePoolVO pool : storagePools) {
- if (shared != pool.isShared()) {
- filteredStoragePools.remove(pool);
- }
- }
-
- return filteredStoragePools;
+ Map details = tagsToDetails(tags);
+ storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.HOST);
+ }
+
+ return storagePools;
+ }
+
+ @Override
+ public List findZoneWideStoragePoolsByTags(long dcId, String[] tags) {
+ List storagePools = null;
+ if (tags == null || tags.length == 0) {
+ SearchCriteriaService sc = SearchCriteria2.create(StoragePoolVO.class);
+ sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId);
+ sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up);
+ sc.addAnd(sc.getEntity().getScope(), Op.EQ, ScopeType.ZONE);
+ return sc.list();
+ } else {
+ Map details = tagsToDetails(tags);
+
+ StringBuilder sql = new StringBuilder(ZoneWideDetailsSqlPrefix);
+
+ for (Map.Entry detail : details.entrySet()) {
+ sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR ");
+ }
+ sql.delete(sql.length() - 4, sql.length());
+ sql.append(ZoneWideDetailsSqlSuffix);
+ Transaction txn = Transaction.currentTxn();
+ PreparedStatement pstmt = null;
+ try {
+ pstmt = txn.prepareAutoCloseStatement(sql.toString());
+ int i = 1;
+ pstmt.setLong(i++, dcId);
+ pstmt.setString(i++, ScopeType.ZONE.toString());
+ pstmt.setInt(i++, details.size());
+ ResultSet rs = pstmt.executeQuery();
+ List pools = new ArrayList();
+ while (rs.next()) {
+ pools.add(toEntityBean(rs, false));
+ }
+ return pools;
+ } catch (SQLException e) {
+ throw new CloudRuntimeException("Unable to execute " + pstmt, e);
+ }
}
}
diff --git a/server/src/com/cloud/storage/listener/SnapshotStateListener.java b/server/src/com/cloud/storage/listener/SnapshotStateListener.java
index 8f94f23a27c..21fcf11930b 100644
--- a/server/src/com/cloud/storage/listener/SnapshotStateListener.java
+++ b/server/src/com/cloud/storage/listener/SnapshotStateListener.java
@@ -17,27 +17,29 @@
package com.cloud.storage.listener;
-import java.util.HashMap;
-import java.util.Map;
-
import javax.inject.Inject;
import org.apache.cloudstack.framework.events.EventBus;
import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.log4j.Logger;
+import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import com.cloud.event.EventCategory;
import com.cloud.server.ManagementServer;
import com.cloud.storage.Snapshot;
+import com.cloud.storage.Snapshot.State;
import com.cloud.storage.Snapshot.Event;
import com.cloud.storage.Snapshot.State;
import com.cloud.storage.SnapshotVO;
import com.cloud.utils.fsm.StateListener;
+import com.cloud.utils.component.ComponentContext;
+
+import java.util.HashMap;
+import java.util.Map;
public class SnapshotStateListener implements StateListener {
- // get the event bus provider if configured
- @Inject protected EventBus _eventBus;
+ protected static EventBus _eventBus = null;
private static final Logger s_logger = Logger.getLogger(VolumeStateListener.class);
@@ -59,8 +61,10 @@ public class SnapshotStateListener implements StateListener {
- // get the event bus provider if configured
- @Inject protected EventBus _eventBus = null;
+ protected static EventBus _eventBus = null;
private static final Logger s_logger = Logger.getLogger(VolumeStateListener.class);
@@ -57,8 +56,10 @@ public class VolumeStateListener implements StateListener
private void pubishOnEventBus(String event, String status, Volume vo, State oldState, State newState) {
- if (_eventBus == null) {
- return; // no provider is configured to provide events bus, so just return
+ try {
+ _eventBus = ComponentContext.getComponent(EventBus.class);
+ } catch(NoSuchBeanDefinitionException nbe) {
+ return; // no provider is configured to provide events bus, so just return
}
String resourceName = getEntityFromClassName(Volume.class.getName());
diff --git a/server/src/com/cloud/upgrade/dao/Upgrade410to420.java b/server/src/com/cloud/upgrade/dao/Upgrade410to420.java
index 9000e15f7aa..db562b1c17a 100644
--- a/server/src/com/cloud/upgrade/dao/Upgrade410to420.java
+++ b/server/src/com/cloud/upgrade/dao/Upgrade410to420.java
@@ -59,10 +59,11 @@ public class Upgrade410to420 implements DbUpgrade {
@Override
public void performDataMigration(Connection conn) {
- PreparedStatement sql = null;
- try {
- sql = conn.prepareStatement("update vm_template set image_data_store_id = 1 where type = 'SYSTEM' or type = 'BUILTIN'");
- sql.executeUpdate();
+ upgradeVmwareLabels(conn);
+ PreparedStatement sql = null;
+ try {
+ sql = conn.prepareStatement("update vm_template set image_data_store_id = 1 where type = 'SYSTEM' or type = 'BUILTIN'");
+ sql.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString());
} finally {
@@ -73,7 +74,7 @@ public class Upgrade410to420 implements DbUpgrade {
}
}
}
- }
+ }
@Override
public File[] getCleanupScripts() {
@@ -83,5 +84,78 @@ public class Upgrade410to420 implements DbUpgrade {
}
return new File[] { new File(script) };
- }
- }
\ No newline at end of file
+ }
+
+ private String getNewLabel(ResultSet rs, String oldParamValue) {
+ int separatorIndex;
+ String oldGuestLabel;
+ String newGuestLabel = oldParamValue;
+ try {
+ // No need to iterate because the global param setting applies to all physical networks irrespective of traffic type
+ if (rs.next()) {
+ oldGuestLabel = rs.getString("vmware_network_label");
+ // guestLabel is in format [[],VLANID]
+ separatorIndex = oldGuestLabel.indexOf(",");
+ if(separatorIndex > -1) {
+ newGuestLabel += oldGuestLabel.substring(separatorIndex);
+ }
+ }
+ } catch (SQLException e) {
+ s_logger.error(new CloudRuntimeException("Failed to read vmware_network_label : " + e));
+ } finally {
+ try {
+ if (rs != null) {
+ rs.close();
+ }
+ } catch (SQLException e) {
+ }
+ }
+ return newGuestLabel;
+ }
+
+ private void upgradeVmwareLabels(Connection conn) {
+ PreparedStatement pstmt = null;
+ ResultSet rsParams = null;
+ ResultSet rsLabel = null;
+ String newLabel;
+ String trafficType = null;
+ String trafficTypeVswitchParam;
+ String trafficTypeVswitchParamValue;
+
+ try {
+ // update the existing vmware traffic labels
+ pstmt = conn.prepareStatement("select name,value from `cloud`.`configuration` where category='Hidden' and value is not NULL and name REGEXP 'vmware\\.*\\.vswitch';");
+ rsParams = pstmt.executeQuery();
+ while (rsParams.next()) {
+ trafficTypeVswitchParam = rsParams.getString("name");
+ trafficTypeVswitchParamValue = rsParams.getString("value");
+ // When upgraded from 4.0 to 4.1 update physical network traffic label with trafficTypeVswitchParam
+ if (trafficTypeVswitchParam.equals("vmware.private.vswitch")) {
+ trafficType = "Management"; //TODO(sateesh): Ignore storage traffic, as required physical network already implemented, anything else tobe done?
+ } else if (trafficTypeVswitchParam.equals("vmware.public.vswitch")) {
+ trafficType = "Public";
+ } else if (trafficTypeVswitchParam.equals("vmware.guest.vswitch")) {
+ trafficType = "Guest";
+ }
+ s_logger.debug("Updating vmware label for " + trafficType + " traffic. Update SQL statement is " + pstmt);
+ pstmt = conn.prepareStatement("select physical_network_id, traffic_type, vmware_network_label from physical_network_traffic_types where vmware_network_label is not NULL and traffic_type='" + trafficType + "';");
+ rsLabel = pstmt.executeQuery();
+ newLabel = getNewLabel(rsLabel, trafficTypeVswitchParamValue);
+ pstmt = conn.prepareStatement("update physical_network_traffic_types set vmware_network_label = " + newLabel + " where traffic_type = '" + trafficType + "' and vmware_network_label is not NULL;");
+ pstmt.executeUpdate();
+ }
+ } catch (SQLException e) {
+ throw new CloudRuntimeException("Unable to set vmware traffic labels ", e);
+ } finally {
+ try {
+ if (rsParams != null) {
+ rsParams.close();
+ }
+ if (pstmt != null) {
+ pstmt.close();
+ }
+ } catch (SQLException e) {
+ }
+ }
+ }
+}
diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index c2bba639015..ce53c4579fd 100755
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -2907,7 +2907,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
}
String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), new Long(callerUser.getId()).toString());
- vmEntity.deploy(reservationId, new Long(callerUser.getId()).toString());
+ vmEntity.deploy(reservationId, new Long(callerUser.getId()).toString(), params);
Pair> vmParamPair = new Pair(vm, params);
if (vm != null && vm.isUpdateParameters()) {
diff --git a/server/src/com/cloud/vm/UserVmStateListener.java b/server/src/com/cloud/vm/UserVmStateListener.java
index 18f85670948..04aa8180b67 100644
--- a/server/src/com/cloud/vm/UserVmStateListener.java
+++ b/server/src/com/cloud/vm/UserVmStateListener.java
@@ -20,24 +20,24 @@ import com.cloud.event.EventCategory;
import com.cloud.event.EventTypes;
import com.cloud.event.UsageEventUtils;
import com.cloud.event.dao.UsageEventDao;
+import com.cloud.network.Network;
import com.cloud.network.dao.NetworkDao;
import com.cloud.network.dao.NetworkVO;
-import com.cloud.network.Network;
import com.cloud.server.ManagementServer;
+import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.fsm.StateListener;
import com.cloud.vm.VirtualMachine.Event;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.dao.NicDao;
-
+import org.apache.cloudstack.framework.events.EventBus;
import org.apache.log4j.Logger;
+import org.springframework.beans.factory.NoSuchBeanDefinitionException;
-import java.util.Enumeration;
+import javax.inject.Inject;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import javax.inject.Inject;
-
public class UserVmStateListener implements StateListener {
@Inject protected UsageEventDao _usageEventDao;
@@ -45,8 +45,7 @@ public class UserVmStateListener implements StateListener listAllUpAndEnabledHostsInOneZoneByHypervisor(
+ HypervisorType type, long dcId) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
}
diff --git a/setup/bindir/cloud-setup-databases.in b/setup/bindir/cloud-setup-databases.in
index 52a23d6f0fc..1603c23eecb 100755
--- a/setup/bindir/cloud-setup-databases.in
+++ b/setup/bindir/cloud-setup-databases.in
@@ -311,6 +311,7 @@ for example:
self.errorAndExit(err)
self.putDbProperty(key, value)
self.info("Preparing %s"%dbpPath, True)
+ self.putDbProperty("region.id", self.options.regionid)
prepareDBDotProperties()
@@ -576,6 +577,8 @@ for example:
help="Secret key used to encrypt sensitive database values. A string, default is password")
self.parser.add_option("-i", "--mshost", action="store", type="string", dest="mshostip", default="",
help="Cluster management server host IP. A string, by default it will try to detect a local IP")
+ self.parser.add_option("-r", "--regionid", action="store", type="string", dest="regionid", default="1",
+ help="Region Id for the management server cluster")
(self.options, self.args) = self.parser.parse_args()
parseCasualCredit()
diff --git a/tools/apidoc/generateadmincommands.xsl b/tools/apidoc/generateadmincommands.xsl
index 3e9c6c598fa..a33e7baf20d 100644
--- a/tools/apidoc/generateadmincommands.xsl
+++ b/tools/apidoc/generateadmincommands.xsl
@@ -138,8 +138,16 @@ version="1.0">
+