diff --git a/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java b/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java
index d637da638ac..b554719f188 100644
--- a/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java
+++ b/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java
@@ -32,7 +32,7 @@ public interface RemoteAccessVpnService {
RemoteAccessVpn createRemoteAccessVpn(long vpnServerAddressId, String ipRange, boolean openFirewall, long networkId)
throws NetworkRuleConflictException;
- void destroyRemoteAccessVpn(long vpnServerAddressId, Account caller) throws ResourceUnavailableException;
+ void destroyRemoteAccessVpnForIp(long vpnServerAddressId, Account caller) throws ResourceUnavailableException;
RemoteAccessVpn startRemoteAccessVpn(long vpnServerAddressId, boolean openFirewall) throws ResourceUnavailableException;
VpnUser addVpnUser(long vpnOwnerId, String userName, String password);
diff --git a/api/src/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java b/api/src/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java
index 8f78fe3a959..41691ea86d0 100644
--- a/api/src/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java
@@ -76,9 +76,9 @@ public class DisassociateIPAddrCmd extends BaseAsyncCmd {
UserContext.current().setEventDetails("Ip Id: " + getIpAddressId());
boolean result = false;
if (!isPortable(id)) {
- _networkService.releaseIpAddress(getIpAddressId());
+ result = _networkService.releaseIpAddress(getIpAddressId());
} else {
- _networkService.releaseIpAddress(getIpAddressId());
+ result = _networkService.releaseIpAddress(getIpAddressId());
}
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());
diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java
index 5b1c5c6b4e6..06c25305a00 100644
--- a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java
@@ -84,7 +84,7 @@ public class DeleteRemoteAccessVpnCmd extends BaseAsyncCmd {
@Override
public void execute() throws ResourceUnavailableException {
- _ravService.destroyRemoteAccessVpn(publicIpId, UserContext.current().getCaller());
+ _ravService.destroyRemoteAccessVpnForIp(publicIpId, UserContext.current().getCaller());
}
@Override
diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in
index 1b6ee6eb089..6fa9d38baa4 100644
--- a/client/tomcatconf/nonossComponentContext.xml.in
+++ b/client/tomcatconf/nonossComponentContext.xml.in
@@ -251,7 +251,7 @@
-
+
diff --git a/debian/rules b/debian/rules
index e381b1a8ebe..48485bb9d9b 100755
--- a/debian/rules
+++ b/debian/rules
@@ -34,7 +34,7 @@ build: build-indep
build-indep: build-indep-stamp
build-indep-stamp: configure
- mvn package -Pawsapi -DskipTests -Dsystemvm \
+ mvn clean package -Pawsapi -DskipTests -Dsystemvm \
-Dcs.replace.properties=replace.properties.tmp \
${ACS_BUILD_OPTS}
touch $@
diff --git a/docs/en-US/CloudStack_Nicira_NVP_Guide.xml b/docs/en-US/CloudStack_Nicira_NVP_Guide.xml
index 7f156d5dc09..5431fc1cb43 100644
--- a/docs/en-US/CloudStack_Nicira_NVP_Guide.xml
+++ b/docs/en-US/CloudStack_Nicira_NVP_Guide.xml
@@ -48,6 +48,7 @@
+
diff --git a/docs/en-US/images/nvp-add-controller.png b/docs/en-US/images/nvp-add-controller.png
new file mode 100644
index 00000000000..e02d31f0a37
Binary files /dev/null and b/docs/en-US/images/nvp-add-controller.png differ
diff --git a/docs/en-US/images/nvp-enable-provider.png b/docs/en-US/images/nvp-enable-provider.png
new file mode 100644
index 00000000000..0f2d02ddfa9
Binary files /dev/null and b/docs/en-US/images/nvp-enable-provider.png differ
diff --git a/docs/en-US/images/nvp-network-offering.png b/docs/en-US/images/nvp-network-offering.png
new file mode 100644
index 00000000000..c2d25c48c19
Binary files /dev/null and b/docs/en-US/images/nvp-network-offering.png differ
diff --git a/docs/en-US/images/nvp-physical-network-stt.png b/docs/en-US/images/nvp-physical-network-stt.png
new file mode 100644
index 00000000000..2ce7853ac54
Binary files /dev/null and b/docs/en-US/images/nvp-physical-network-stt.png differ
diff --git a/docs/en-US/images/nvp-vpc-offering-edit.png b/docs/en-US/images/nvp-vpc-offering-edit.png
new file mode 100644
index 00000000000..ff235e24cd6
Binary files /dev/null and b/docs/en-US/images/nvp-vpc-offering-edit.png differ
diff --git a/docs/en-US/plugin-niciranvp-about.xml b/docs/en-US/plugin-niciranvp-about.xml
index 8d2e20e7756..cfab83c73c3 100644
--- a/docs/en-US/plugin-niciranvp-about.xml
+++ b/docs/en-US/plugin-niciranvp-about.xml
@@ -1,5 +1,5 @@
-
%BOOK_ENTITIES;
diff --git a/docs/en-US/plugin-niciranvp-devicemanagement.xml b/docs/en-US/plugin-niciranvp-devicemanagement.xml
index 57b8eee9d7d..761c39f3179 100644
--- a/docs/en-US/plugin-niciranvp-devicemanagement.xml
+++ b/docs/en-US/plugin-niciranvp-devicemanagement.xml
@@ -21,27 +21,15 @@
under the License.
-->
- Device-management
- In &PRODUCT; 4.0.x each Nicira NVP setup is considered a "device" that can be added and removed from a physical network. To complete the configuration of the Nicira NVP plugin a device needs to be added to the physical network using the "addNiciraNVPDevice" API call. The plugin is now enabled on the physical network and any guest networks created on that network will be provisioned using the Nicira NVP Controller.
- The plugin introduces a set of new API calls to manage the devices, see below or refer to the API reference.
-
- addNiciraNvpDevice
-
-
- physicalnetworkid: the UUID of the physical network on which the device is configured
- hostname: the IP address of the NVP controller
- username: the username for access to the NVP API
- password: the password for access to the NVP API
- transportzoneuuid: the UUID of the transportzone
-
-
- deleteNiciraNVPDevice
-
-
- nvpdeviceid: the UUID of the device
-
-
- listNiciraNVPDevices
-
+ Device Management
+ In &PRODUCT; a Nicira NVP setup is considered a "device" that can be added and removed from a physical network. To complete the configuration of the Nicira NVP plugin a device needs to be added to the physical network. Press the "Add NVP Controller" button on the provider panel and enter the configuration details.
+
+
+
+
+
+ nvp-physical-network-stt.png: a screenshot of the device configuration popup.
+
+
diff --git a/docs/en-US/plugin-niciranvp-features.xml b/docs/en-US/plugin-niciranvp-features.xml
index c346bfb64e3..e439f1b4923 100644
--- a/docs/en-US/plugin-niciranvp-features.xml
+++ b/docs/en-US/plugin-niciranvp-features.xml
@@ -22,12 +22,63 @@
-->
Features of the Nicira NVP Plugin
- In &PRODUCT; release 4.0.0-incubating this plugin supports the Connectivity service. This service is responsible for creating Layer 2 networks supporting the networks created by Guests. In other words when an tenant creates a new network, instead of the traditional VLAN a logical network will be created by sending the appropriate calls to the Nicira NVP Controller.
- The plugin has been tested with Nicira NVP versions 2.1.0, 2.2.0 and 2.2.1
- In &PRODUCT; 4.0.0-incubating only the XenServer hypervisor is supported for use in
- combination with Nicira NVP.
- In &PRODUCT; 4.1.0-incubating both KVM and XenServer hypervisors are
- supported.
- In &PRODUCT; 4.0.0-incubating the UI components for this plugin are not complete,
- configuration is done by sending commands to the API.
+ The following table lists the CloudStack network services provided by the Nicira NVP Plugin.
+
+ Supported Services
+
+
+
+ Network Service
+ CloudStack version
+ NVP version
+
+
+
+
+ Virtual Networking
+ >= 4.0
+ >= 2.2.1
+
+
+ Source NAT
+ >= 4.1
+ >= 3.0.1
+
+
+ Static NAT
+ >= 4.1
+ >= 3.0.1
+
+
+ Port Forwarding
+ >= 4.1
+ >= 3.0.1
+
+
+
+
+ The Virtual Networking service was originally called 'Connectivity' in CloudStack 4.0
+ The following hypervisors are supported by the Nicira NVP Plugin.
+
+ Supported Hypervisors
+
+
+
+ Hypervisor
+ CloudStack version
+
+
+
+
+ XenServer
+ >= 4.0
+
+
+ KVM
+ >= 4.1
+
+
+
+
+ Please refer to the Nicira NVP configuration guide on how to prepare the hypervisors for Nicira NVP integration.
diff --git a/docs/en-US/plugin-niciranvp-introduction.xml b/docs/en-US/plugin-niciranvp-introduction.xml
index 9c1d42df32d..a06f12317e5 100644
--- a/docs/en-US/plugin-niciranvp-introduction.xml
+++ b/docs/en-US/plugin-niciranvp-introduction.xml
@@ -22,5 +22,8 @@
-->
Introduction to the Nicira NVP Plugin
- The Nicira NVP plugin allows CloudStack to use the Nicira solution for virtualized network as a provider for CloudStack networks and services.
+ The Nicira NVP plugin adds Nicira NVP as one of the available SDN implementations in
+ CloudStack. With the plugin an exisiting Nicira NVP setup can be used by CloudStack to
+ implement isolated guest networks and to provide additional services like routing and
+ NAT.
diff --git a/docs/en-US/plugin-niciranvp-networkofferings.xml b/docs/en-US/plugin-niciranvp-networkofferings.xml
new file mode 100644
index 00000000000..b30437e97ba
--- /dev/null
+++ b/docs/en-US/plugin-niciranvp-networkofferings.xml
@@ -0,0 +1,131 @@
+
+
+%BOOK_ENTITIES;
+
+%xinclude;
+]>
+
+
+ Network Offerings
+ Using the Nicira NVP plugin requires a network offering with Virtual Networking enabled and configured to use the NiciraNvp element. Typical use cases combine services from the Virtual Router appliance and the Nicira NVP plugin.
+
+ Isolated network offering with regular services from the Virtual Router.
+
+
+
+ Service
+ Provider
+
+
+
+
+ VPN
+ VirtualRouter
+
+
+ DHCP
+ VirtualRouter
+
+
+ DNS
+ VirtualRouter
+
+
+ Firewall
+ VirtualRouter
+
+
+ Load Balancer
+ VirtualRouter
+
+
+ User Data
+ VirtualRouter
+
+
+ Source NAT
+ VirtualRouter
+
+
+ Static NAT
+ VirtualRouter
+
+
+ Post Forwarding
+ VirtualRouter
+
+
+ Virtual Networking
+ NiciraNVP
+
+
+
+
+
+
+
+
+
+ nvp-physical-network-stt.png: a screenshot of a network offering.
+
+
+ The tag in the network offering should be set to the name of the physical network with the NVP provider.
+ Isolated network with network services. The virtual router is still required to provide network services like dns and dhcp.
+
+ Isolated network offering with network services
+
+
+
+ Service
+ Provider
+
+
+
+
+ DHCP
+ VirtualRouter
+
+
+ DNS
+ VirtualRouter
+
+
+ User Data
+ VirtualRouter
+
+
+ Source NAT
+ NiciraNVP
+
+
+ Static NAT
+ NiciraNVP
+
+
+ Post Forwarding
+ NiciraNVP
+
+
+ Virtual Networking
+ NiciraNVP
+
+
+
+
+
+
diff --git a/docs/en-US/plugin-niciranvp-physicalnet.xml b/docs/en-US/plugin-niciranvp-physicalnet.xml
new file mode 100644
index 00000000000..d3202905fb1
--- /dev/null
+++ b/docs/en-US/plugin-niciranvp-physicalnet.xml
@@ -0,0 +1,37 @@
+
+
+%BOOK_ENTITIES;
+
+%xinclude;
+]>
+
+
+ Zone Configuration
+ &PRODUCT; needs to have at least one physical network with the isolation method set to "STT". This network should be enabled for the Guest traffic type.
+ The Guest traffic type should be configured with the traffic label that matches the name of
+ the Integration Bridge on the hypervisor. See the Nicira NVP User Guide for more details
+ on how to set this up in XenServer or KVM.
+
+
+
+
+
+ nvp-physical-network-stt.png: a screenshot of a physical network with the STT isolation type
+
+
+
diff --git a/docs/en-US/plugin-niciranvp-preparations.xml b/docs/en-US/plugin-niciranvp-preparations.xml
index 762c941fd13..60725591fda 100644
--- a/docs/en-US/plugin-niciranvp-preparations.xml
+++ b/docs/en-US/plugin-niciranvp-preparations.xml
@@ -22,17 +22,16 @@
-->
Prerequisites
- Before enabling the Nicira NVP plugin the NVP Controller needs to be configured. Please review the NVP User Guide on how to do that.
- &PRODUCT; needs to have at least one physical network with the isolation method set to "STT". This network should be enabled for the Guest traffic type.
- The Guest traffic type should be configured with the traffic label that matches the name of
- the Integration Bridge on the hypervisor. See the Nicira NVP User Guide for more details
- on how to set this up in XenServer or KVM.
+ Before enabling the Nicira NVP plugin the NVP Controller needs to be configured. Please review the NVP User Guide on how to do that.
Make sure you have the following information ready:
The IP address of the NVP Controller
The username to access the API
The password to access the API
The UUID of the Transport Zone that contains the hypervisors in this Zone
- The UUID of the Physical Network that will be used for the Guest networks
+
+ The UUID of the Gateway Service used to provide router and NAT services.
+
+ The gateway service uuid is optional and is used for Layer 3 services only (SourceNat, StaticNat and PortForwarding)
diff --git a/docs/en-US/plugin-niciranvp-provider.xml b/docs/en-US/plugin-niciranvp-provider.xml
index 80fb2273238..8694478b483 100644
--- a/docs/en-US/plugin-niciranvp-provider.xml
+++ b/docs/en-US/plugin-niciranvp-provider.xml
@@ -22,21 +22,15 @@
-->
Enabling the service provider
- To allow CloudStack to use the Nicira NVP Plugin the network service provider needs to be enabled on the physical network. The following sequence of API calls will enable the network service provider
-
- addNetworkServiceProvider
-
-
- name = "NiciraNvp"
- physicalnetworkid = <the uuid of the physical network>
-
-
- updateNetworkServiceProvider
-
-
- id = <the provider uuid returned by the previous call>
- state = "Enabled"
-
-
-
+ The Nicira NVP provider is disabled by default. Navigate to the "Network Service Providers" configuration of the physical network with the STT isolation type. Navigate to the Nicira NVP provider and press the "Enable Provider" button.
+ CloudStack 4.0 does not have the UI interface to configure the Nicira NVP plugin. Configuration needs to be done using the API directly.
+
+
+
+
+
+ nvp-physical-network-stt.png: a screenshot of an enabled Nicira NVP provider
+
+
+
\ No newline at end of file
diff --git a/docs/en-US/plugin-niciranvp-revisions.xml b/docs/en-US/plugin-niciranvp-revisions.xml
index b8e6935c5d1..b58d3336aba 100644
--- a/docs/en-US/plugin-niciranvp-revisions.xml
+++ b/docs/en-US/plugin-niciranvp-revisions.xml
@@ -40,6 +40,20 @@
+
+ 1-0
+ Wed May 22 2013
+
+ Hugo
+ Trippaers
+ hugo@apache.org
+
+
+
+ Documentation updated for &PRODUCT; 4.1.0
+
+
+
diff --git a/docs/en-US/plugin-niciranvp-tables.xml b/docs/en-US/plugin-niciranvp-tables.xml
index 4f816550b30..615f3494c09 100644
--- a/docs/en-US/plugin-niciranvp-tables.xml
+++ b/docs/en-US/plugin-niciranvp-tables.xml
@@ -23,29 +23,84 @@
Database tables
The following tables are added to the cloud database for the Nicira NVP Plugin
-
- nicira_nvp_nic_map, contains a mapping from nic to logical switch port
-
-
- id
- logicalswitch, uuid of the logical switch this port is connected to
- logicalswitchport, uuid of the logical switch port for this nic
- nic, the CloudStack uuid for this nic, reference to the nics table
-
-
-
-
- external_nicira_nvp_devices, contains all configured devices
-
-
- id
- uuid
- physical_network_id, the physical network this device is configured on
- provider_name, set to "NiciraNvp"
- device_name, display name for this device
- host_id, reference to the host table with the device configuration
-
-
-
-
+
+ nicira_nvp_nic_map
+
+
+
+ id
+ auto incrementing id
+
+
+ logicalswitch
+ uuid of the logical switch this port is connected to
+
+
+ logicalswitchport
+ uuid of the logical switch port for this nic
+
+
+ nic
+ the &PRODUCT; uuid for this nic, reference to the nics table
+
+
+
+
+
+
+ external_nicira_nvp_devices
+
+
+
+ id
+ auto incrementing id
+
+
+ uuid
+ UUID identifying this device
+
+
+ physical_network_id
+ the physical network this device is configured on
+
+
+ provider_name
+ NiciraNVP
+
+
+ device_name
+ display name for this device
+
+
+ host_id
+ reference to the host table with the device configuration
+
+
+
+
+
+
+ nicira_nvp_router_map
+
+
+
+ id
+ auto incrementing id
+
+
+ logicalrouter_uuid
+ uuid of the logical router
+
+
+ network_id
+ id of the network this router is linked to
+
+
+
+
+
+
+ nicira_nvp_router_map is only available in &PRODUCT; 4.1 and above
+
+
\ No newline at end of file
diff --git a/docs/en-US/plugin-niciranvp-usage.xml b/docs/en-US/plugin-niciranvp-usage.xml
index 76f9a0b5b05..9f04c382bd6 100644
--- a/docs/en-US/plugin-niciranvp-usage.xml
+++ b/docs/en-US/plugin-niciranvp-usage.xml
@@ -21,10 +21,13 @@
under the License.
-->
- Using the Nicira NVP Plugin
+ Configuring the Nicira NVP Plugin
-
-
-
+
+
+
+
diff --git a/docs/en-US/plugin-niciranvp-guide.xml b/docs/en-US/plugin-niciranvp-vpc.xml
similarity index 65%
rename from docs/en-US/plugin-niciranvp-guide.xml
rename to docs/en-US/plugin-niciranvp-vpc.xml
index 89c9871021d..a43c5fa85d3 100644
--- a/docs/en-US/plugin-niciranvp-guide.xml
+++ b/docs/en-US/plugin-niciranvp-vpc.xml
@@ -1,11 +1,10 @@
-
+
%BOOK_ENTITIES;
%xinclude;
]>
-
-
- Plugin Guide for the Nicira NVP Plugin
-
-
-
+
+ Using the Nicira NVP plugin with VPC
+
+
+
+
+
diff --git a/docs/en-US/plugin-niciranvp-vpcfeatures.xml b/docs/en-US/plugin-niciranvp-vpcfeatures.xml
new file mode 100644
index 00000000000..a8d8194e9ba
--- /dev/null
+++ b/docs/en-US/plugin-niciranvp-vpcfeatures.xml
@@ -0,0 +1,28 @@
+
+
+%BOOK_ENTITIES;
+
+%xinclude;
+]>
+
+
+ Supported VPC features
+ The Nicira NVP plugin supports &PRODUCT; VPC to a certain extent. Starting with &PRODUCT; version 4.1 VPCs can be deployed using NVP isolated networks.
+ It is not possible to use a Nicira NVP Logical Router for as a VPC Router
+ It is not possible to connect a private gateway using a Nicira NVP Logical Switch
+
diff --git a/docs/en-US/plugin-niciranvp-vpcnetworkoffering.xml b/docs/en-US/plugin-niciranvp-vpcnetworkoffering.xml
new file mode 100644
index 00000000000..141006ee350
--- /dev/null
+++ b/docs/en-US/plugin-niciranvp-vpcnetworkoffering.xml
@@ -0,0 +1,81 @@
+
+
+%BOOK_ENTITIES;
+
+%xinclude;
+]>
+
+
+ VPC Network Offerings
+ The VPC needs specific network offerings with the VPC flag enabled. Otherwise these network offerings are identical to regular network offerings. To allow VPC networks with a Nicira NVP isolated network the offerings need to support the Virtual Networking service with the NiciraNVP provider.
+ In a typical configuration two network offerings need to be created. One with the loadbalancing service enabled and one without loadbalancing.
+
+ VPC Network Offering with Loadbalancing
+
+
+
+ Service
+ Provider
+
+
+
+
+ VPN
+ VpcVirtualRouter
+
+
+ DHCP
+ VpcVirtualRouter
+
+
+ DNS
+ VpcVirtualRouter
+
+
+ Load Balancer
+ VpcVirtualRouter
+
+
+ User Data
+ VpcVirtualRouter
+
+
+ Source NAT
+ VpcVirtualRouter
+
+
+ Static NAT
+ VpcVirtualRouter
+
+
+ Post Forwarding
+ VpcVirtualRouter
+
+
+ NetworkACL
+ VpcVirtualRouter
+
+
+ Virtual Networking
+ NiciraNVP
+
+
+
+
+
+
diff --git a/docs/en-US/plugin-niciranvp-vpcoffering.xml b/docs/en-US/plugin-niciranvp-vpcoffering.xml
new file mode 100644
index 00000000000..292621e516c
--- /dev/null
+++ b/docs/en-US/plugin-niciranvp-vpcoffering.xml
@@ -0,0 +1,38 @@
+
+
+%BOOK_ENTITIES;
+
+%xinclude;
+]>
+
+
+ VPC Offering with Nicira NVP
+ To allow a VPC to use the Nicira NVP plugin to provision networks, a new VPC offering needs to be created which allows the Virtual Networking service to be implemented by NiciraNVP.
+ This is not currently possible with the UI. The API does provide the proper calls to create a VPC offering with Virtual Networking enabled. However due to a limitation in the 4.1 API it is not possible to select the provider for this network service. To configure the VPC offering with the NiciraNVP provider edit the database table 'vpc_offering_service_map' and change the provider to NiciraNvp for the service 'Connectivity'
+ It is also possible to update the default VPC offering by adding a row to the
+ 'vpc_offering_service_map' with service 'Connectivity' and provider 'NiciraNvp'
+
+
+
+
+
+ nvp-physical-network-stt.png: a screenshot of the mysql table.
+
+
+ When creating a new VPC offering please note that the UI does not allow you to select a VPC offering yet. The VPC needs to be created using the API with the offering UUID.
+
diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java
index c03d377cbe0..95abe5f161e 100644
--- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java
+++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java
@@ -75,6 +75,7 @@ public class Upgrade410to420 implements DbUpgrade {
updateNetworkACLs(conn);
addHostDetailsIndex(conn);
updateNetworksForPrivateGateways(conn);
+ removeFirewallServiceFromSharedNetworkOfferingWithSGService(conn);
}
private void updateSystemVmTemplates(Connection conn) {
@@ -747,4 +748,35 @@ public class Upgrade410to420 implements DbUpgrade {
throw new CloudRuntimeException("Failed to update private networks with VPC id.", e);
}
}
+
+ private void removeFirewallServiceFromSharedNetworkOfferingWithSGService(Connection conn) {
+ PreparedStatement pstmt = null;
+ ResultSet rs = null;
+
+ try {
+ pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='DefaultSharedNetworkOfferingWithSGService'");
+ rs = pstmt.executeQuery();
+ while (rs.next()) {
+ long id = rs.getLong(1);
+ // remove Firewall service for SG shared network offering
+ pstmt = conn.prepareStatement("DELETE `cloud`.`ntwk_offering_service_map` where network_offering_id=? and service='Firewall'");
+ pstmt.setLong(1, id);
+ pstmt.executeUpdate();
+ }
+ } catch (SQLException e) {
+ throw new CloudRuntimeException("Unable to remove Firewall service for SG shared network offering.", e);
+ } finally {
+ try {
+ if (rs != null) {
+ rs.close();
+ }
+
+ if (pstmt != null) {
+ pstmt.close();
+ }
+ } catch (SQLException e) {
+ }
+ }
+ }
+
}
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index b31fb5dfbe5..1e20d75a6a0 100755
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -2619,8 +2619,7 @@ ServerResource {
Domain vms = null;
while (retry-- > 0) {
try {
- vms = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ vms = conn.domainLookupByName(vmName);
State s = convertToState(vms.getInfo().state);
return s;
} catch (final LibvirtException e) {
@@ -2712,8 +2711,7 @@ ServerResource {
try {
conn = LibvirtConnection.getConnectionByVmName(cmd.getVmName());
ifaces = getInterfaces(conn, vmName);
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ dm = conn.domainLookupByName(vmName);
dconn = new Connect("qemu+tcp://" + cmd.getDestinationIp()
+ "/system");
/*
@@ -2728,6 +2726,9 @@ ServerResource {
} finally {
try {
if (dm != null) {
+ if (dm.isPersistent() == 1) {
+ dm.undefine();
+ }
dm.free();
}
if (dconn != null) {
@@ -3159,8 +3160,7 @@ ServerResource {
protected LibvirtVMDef createVMFromSpec(VirtualMachineTO vmTO) {
LibvirtVMDef vm = new LibvirtVMDef();
vm.setDomainName(vmTO.getName());
- vm.setDomUUID(UUID.nameUUIDFromBytes(vmTO.getName().getBytes())
- .toString());
+ vm.setDomUUID(vmTO.getUuid());
vm.setDomDescription(vmTO.getOs());
GuestDef guest = new GuestDef();
@@ -3584,8 +3584,7 @@ ServerResource {
KVMStoragePool attachingPool = attachingDisk.getPool();
try {
if (!attach) {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ dm = conn.domainLookupByName(vmName);
LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
String xml = dm.getXMLDesc(0);
parser.parseDomainXML(xml);
@@ -3634,9 +3633,7 @@ ServerResource {
InternalErrorException {
Domain dm = null;
try {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes((vmName
- .getBytes())));
-
+ dm = conn.domainLookupByName(vmName);
if (attach) {
s_logger.debug("Attaching device: " + xml);
dm.attachDevice(xml);
@@ -3867,8 +3864,7 @@ ServerResource {
for (; i < 5; i++) {
try {
Connect conn = LibvirtConnection.getConnectionByVmName(vm);
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vm
- .getBytes()));
+ dm = conn.domainLookupByName(vm);
DomainInfo.DomainState vps = dm.getInfo().state;
if (vps != null
&& vps != DomainInfo.DomainState.VIR_DOMAIN_SHUTOFF
@@ -4005,8 +4001,7 @@ ServerResource {
for (int i = 0; i < vms.length; i++) {
try {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vms[i]
- .getBytes()));
+ dm = conn.domainLookupByName(vms[i]);
DomainInfo.DomainState ps = dm.getInfo().state;
final State state = convertToState(ps);
@@ -4111,8 +4106,7 @@ ServerResource {
Domain dm = null;
String msg = null;
try {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ dm = conn.domainLookupByName(vmName);
String vmDef = dm.getXMLDesc(0);
s_logger.debug(vmDef);
msg = stopVM(conn, vmName);
@@ -4154,8 +4148,7 @@ ServerResource {
/* Retry 3 times, to make sure we can get the vm's status */
for (int i = 0; i < 3; i++) {
try {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ dm = conn.domainLookupByName(vmName);
state = dm.getInfo().state;
break;
} catch (LibvirtException e) {
@@ -4191,8 +4184,7 @@ ServerResource {
protected String stopVM(Connect conn, String vmName, boolean force) {
Domain dm = null;
try {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ dm = conn.domainLookupByName(vmName);
int persist = dm.isPersistent();
if (force) {
if (dm.isActive() == 1) {
@@ -4279,8 +4271,7 @@ ServerResource {
LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
Domain dm = null;
try {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ dm = conn.domainLookupByName(vmName);
String xmlDesc = dm.getXMLDesc(0);
parser.parseDomainXML(xmlDesc);
return parser.getVncPort();
@@ -4325,8 +4316,7 @@ ServerResource {
LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
Domain dm = null;
try {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ dm = conn.domainLookupByName(vmName);
String xmlDesc = dm.getXMLDesc(0);
parser.parseDomainXML(xmlDesc);
return parser.getDescription();
@@ -4424,15 +4414,14 @@ ServerResource {
private Domain getDomain(Connect conn, String vmName)
throws LibvirtException {
return conn
- .domainLookupByUUID(UUID.nameUUIDFromBytes(vmName.getBytes()));
+ .domainLookupByName(vmName);
}
protected List getInterfaces(Connect conn, String vmName) {
LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
Domain dm = null;
try {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ dm = conn.domainLookupByName(vmName);
parser.parseDomainXML(dm.getXMLDesc(0));
return parser.getInterfaces();
@@ -4454,8 +4443,7 @@ ServerResource {
LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
Domain dm = null;
try {
- dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
- .getBytes()));
+ dm = conn.domainLookupByName(vmName);
parser.parseDomainXML(dm.getXMLDesc(0));
return parser.getDisks();
diff --git a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
index 39e36d65c65..0bafd073f68 100644
--- a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
+++ b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
@@ -60,6 +60,7 @@ public class LibvirtComputingResourceTest {
LibvirtComputingResource lcr = new LibvirtComputingResource();
VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, speed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword);
to.setVncAddr(vncAddr);
+ to.setUuid("b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9");
LibvirtVMDef vm = lcr.createVMFromSpec(to);
vm.setHvsType(_hyperVisorType);
@@ -135,6 +136,7 @@ public class LibvirtComputingResourceTest {
LibvirtComputingResource lcr = new LibvirtComputingResource();
VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword);
to.setVncAddr(vncAddr);
+ to.setUuid("b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9");
LibvirtVMDef vm = lcr.createVMFromSpec(to);
vm.setHvsType(_hyperVisorType);
@@ -181,4 +183,4 @@ public class LibvirtComputingResourceTest {
assertEquals(vmStr, vm.toString());
}
-}
\ No newline at end of file
+}
diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java
index a126925e5f0..c586a7b19f2 100644
--- a/server/src/com/cloud/api/query/QueryManagerImpl.java
+++ b/server/src/com/cloud/api/query/QueryManagerImpl.java
@@ -1095,7 +1095,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService {
}
if (networkId != null) {
- sc.setJoinParameters("nicSearch", "networkId", networkId);
+ sc.setParameters("networkId", networkId);
}
if (vpcId != null) {
diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
index c86d5e1a1b2..795b526c403 100644
--- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
+++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
@@ -380,7 +380,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
if (planner instanceof DeploymentClusterPlanner) {
- ExcludeList PlannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(),
+ ExcludeList plannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(),
avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
avoids.getPoolsToAvoid());
@@ -388,19 +388,19 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
if (clusterList != null && !clusterList.isEmpty()) {
// planner refactoring. call allocators to list hosts
- ExcludeList PlannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(),
+ ExcludeList plannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(),
avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
avoids.getPoolsToAvoid());
- resetAvoidSet(PlannerAvoidOutput, PlannerAvoidInput);
+ resetAvoidSet(plannerAvoidOutput, plannerAvoidInput);
dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc,
- getPlannerUsage(planner), PlannerAvoidOutput);
+ getPlannerUsage(planner), plannerAvoidOutput);
if (dest != null) {
return dest;
}
// reset the avoid input to the planners
- resetAvoidSet(avoids, PlannerAvoidOutput);
+ resetAvoidSet(avoids, plannerAvoidOutput);
} else {
return null;
@@ -815,12 +815,8 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
// remove any hosts/pools that the planners might have added
// to get the list of hosts/pools that Allocators flagged as 'avoid'
- if (allocatorAvoidOutput.getHostsToAvoid() != null && plannerAvoidOutput.getHostsToAvoid() != null) {
- allocatorAvoidOutput.getHostsToAvoid().removeAll(plannerAvoidOutput.getHostsToAvoid());
- }
- if (allocatorAvoidOutput.getPoolsToAvoid() != null && plannerAvoidOutput.getPoolsToAvoid() != null) {
- allocatorAvoidOutput.getPoolsToAvoid().removeAll(plannerAvoidOutput.getPoolsToAvoid());
- }
+
+ resetAvoidSet(allocatorAvoidOutput, plannerAvoidOutput);
// if all hosts or all pools in the cluster are in avoid set after this
// pass, then put the cluster in avoid set.
@@ -829,8 +825,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
List allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(),
clusterVO.getPodId(), clusterVO.getDataCenterId(), null);
for (HostVO host : allhostsInCluster) {
- if (allocatorAvoidOutput.getHostsToAvoid() == null
- || !allocatorAvoidOutput.getHostsToAvoid().contains(host.getId())) {
+ if (!allocatorAvoidOutput.shouldAvoid(host)) {
// there's some host in the cluster that is not yet in avoid set
avoidAllHosts = false;
}
@@ -839,8 +834,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(),
clusterVO.getPodId(), clusterVO.getId(), null);
for (StoragePoolVO pool : allPoolsInCluster) {
- if (allocatorAvoidOutput.getPoolsToAvoid() == null
- || !allocatorAvoidOutput.getPoolsToAvoid().contains(pool.getId())) {
+ if (!allocatorAvoidOutput.shouldAvoid(pool)) {
// there's some pool in the cluster that is not yet in avoid set
avoidAllPools = false;
}
diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java
index 0f43b87685e..254510b15a9 100755
--- a/server/src/com/cloud/network/NetworkManagerImpl.java
+++ b/server/src/com/cloud/network/NetworkManagerImpl.java
@@ -3273,7 +3273,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
// the code would be triggered
s_logger.debug("Cleaning up remote access vpns as a part of public IP id=" + ipId + " release...");
try {
- _vpnMgr.destroyRemoteAccessVpn(ipId, caller);
+ _vpnMgr.destroyRemoteAccessVpnForIp(ipId, caller);
} catch (ResourceUnavailableException e) {
s_logger.warn("Unable to destroy remote access vpn for ip id=" + ipId + " as a part of ip release", e);
success = false;
diff --git a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java
index 062743b23af..9e7bb13b867 100755
--- a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java
+++ b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java
@@ -226,10 +226,10 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc
}
@Override @DB
- public void destroyRemoteAccessVpn(long ipId, Account caller) throws ResourceUnavailableException {
+ public void destroyRemoteAccessVpnForIp(long ipId, Account caller) throws ResourceUnavailableException {
RemoteAccessVpnVO vpn = _remoteAccessVpnDao.findByPublicIpAddress(ipId);
if (vpn == null) {
- s_logger.debug("vpn id=" + ipId + " does not exists ");
+ s_logger.debug("there are no Remote access vpns for public ip address id=" + ipId);
return;
}
diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java
index aac8d19eb0e..7421422d294 100755
--- a/server/src/com/cloud/user/AccountManagerImpl.java
+++ b/server/src/com/cloud/user/AccountManagerImpl.java
@@ -628,7 +628,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
try {
for (RemoteAccessVpnVO vpn : remoteAccessVpns) {
- _remoteAccessVpnMgr.destroyRemoteAccessVpn(vpn.getServerAddressId(), caller);
+ _remoteAccessVpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller);
}
} catch (ResourceUnavailableException ex) {
s_logger.warn("Failed to cleanup remote access vpn resources as a part of account id=" + accountId + " cleanup due to Exception: ", ex);
diff --git a/setup/db/db/schema-302to40.sql b/setup/db/db/schema-302to40.sql
index f17f067c6ef..7fa73483db6 100644
--- a/setup/db/db/schema-302to40.sql
+++ b/setup/db/db/schema-302to40.sql
@@ -134,7 +134,7 @@ ALTER TABLE `cloud`.`account` ADD COLUMN `default_zone_id` bigint unsigned;
ALTER TABLE `cloud`.`account` ADD CONSTRAINT `fk_account__default_zone_id` FOREIGN KEY `fk_account__default_zone_id`(`default_zone_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE;
-DELETE FROM `cloud`.`storage_pool_host_ref` WHERE pool_id IN (SELECT id FROM storage_pool WHERE removed IS NOT NULL);
+DELETE FROM `cloud`.`storage_pool_host_ref` WHERE pool_id IN (SELECT id FROM `cloud`.`storage_pool` WHERE removed IS NOT NULL);
DROP TABLE IF EXISTS `cloud`.`cluster_vsm_map`;
DROP TABLE IF EXISTS `cloud`.`virtual_supervisor_module`;
@@ -179,14 +179,14 @@ CREATE TABLE `cloud`.`port_profile` (
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-DELETE FROM `cloud`.`storage_pool_host_ref` WHERE pool_id IN (SELECT id FROM storage_pool WHERE removed IS NOT NULL);
+DELETE FROM `cloud`.`storage_pool_host_ref` WHERE pool_id IN (SELECT id FROM `cloud`.`storage_pool` WHERE removed IS NOT NULL);
ALTER TABLE `cloud`.`service_offering` MODIFY `nw_rate` smallint(5) unsigned DEFAULT '200' COMMENT 'network rate throttle mbits/s';
-- RBD Primary Storage pool support (commit: 406fd95d87bfcdbb282d65589ab1fb6e9fd0018a)
-ALTER TABLE `storage_pool` ADD `user_info` VARCHAR( 255 ) NULL COMMENT 'Authorization information for the storage pool. Used by network filesystems' AFTER `host_address`;
+ALTER TABLE `cloud`.`storage_pool` ADD `user_info` VARCHAR( 255 ) NULL COMMENT 'Authorization information for the storage pool. Used by network filesystems' AFTER `host_address`;
-- Resource tags (commit: 62d45b9670520a1ee8b520509393d4258c689b50)
CREATE TABLE `cloud`.`resource_tags` (
@@ -232,9 +232,9 @@ CREATE TABLE `cloud`.`nicira_nvp_nic_map` (
-- Remove the unique constraint on physical_network_id, provider_name from physical_network_service_providers
-- Because the name of this contraint is not set we need this roundabout way
-- The key is also used by the foreign key constraint so drop and recreate that one
-ALTER TABLE physical_network_service_providers DROP FOREIGN KEY fk_pnetwork_service_providers__physical_network_id;
+ALTER TABLE `cloud`.`physical_network_service_providers` DROP FOREIGN KEY fk_pnetwork_service_providers__physical_network_id;
-SET @constraintname = (select CONCAT(CONCAT('DROP INDEX ', A.CONSTRAINT_NAME), ' ON physical_network_service_providers' )
+SET @constraintname = (select CONCAT(CONCAT('DROP INDEX ', A.CONSTRAINT_NAME), ' ON cloud.physical_network_service_providers' )
from information_schema.key_column_usage A
JOIN information_schema.key_column_usage B ON B.table_name = 'physical_network_service_providers' AND B.COLUMN_NAME = 'provider_name' AND A.COLUMN_NAME ='physical_network_id' AND B.CONSTRAINT_NAME=A.CONSTRAINT_NAME
where A.table_name = 'physical_network_service_providers' LIMIT 1);
@@ -243,7 +243,7 @@ PREPARE stmt1 FROM @constraintname;
EXECUTE stmt1;
DEALLOCATE PREPARE stmt1;
-AlTER TABLE physical_network_service_providers ADD CONSTRAINT `fk_pnetwork_service_providers__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE;
+AlTER TABLE `cloud`.`physical_network_service_providers` ADD CONSTRAINT `fk_pnetwork_service_providers__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE;
UPDATE `cloud`.`configuration` SET description='In second, timeout for creating volume from snapshot' WHERE name='create.volume.from.snapshot.wait';
ALTER TABLE `cloud`.`data_center` ADD COLUMN `is_local_storage_enabled` tinyint NOT NULL DEFAULT 0 COMMENT 'Is local storage offering enabled for this data center; 1: enabled, 0: not';
diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql
index b7b1c7a91dd..381a4cea612 100644
--- a/setup/db/db/schema-40to410.sql
+++ b/setup/db/db/schema-40to410.sql
@@ -1639,3 +1639,15 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Usage', 'DEFAULT', 'manageme
INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (163, UUID(), 10, 'Ubuntu 12.04 (32-bit)');
INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (164, UUID(), 10, 'Ubuntu 12.04 (64-bit)');
+
+DROP TABLE IF EXISTS `cloud`.`netscaler_pod_ref`;
+CREATE TABLE `cloud`.`netscaler_pod_ref` (
+ `id` bigint unsigned NOT NULL auto_increment COMMENT 'id',
+ `external_load_balancer_device_id` bigint unsigned NOT NULL COMMENT 'id of external load balancer device',
+ `pod_id` bigint unsigned NOT NULL COMMENT 'pod id',
+ PRIMARY KEY (`id`),
+ CONSTRAINT `fk_ns_pod_ref__pod_id` FOREIGN KEY (`pod_id`) REFERENCES `cloud`.`host_pod_ref`(`id`) ON DELETE CASCADE,
+ CONSTRAINT `fk_ns_pod_ref__device_id` FOREIGN KEY (`external_load_balancer_device_id`) REFERENCES `external_load_balancer_devices`(`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'eip.use.multiple.netscalers' , 'false', 'Should be set to true, if there will be multiple NetScaler devices providing EIP service in a zone');
diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql
index 7b5e9cb5f29..b5480359edd 100644
--- a/setup/db/db/schema-410to420.sql
+++ b/setup/db/db/schema-410to420.sql
@@ -25,8 +25,8 @@ SET foreign_key_checks = 0;
ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `max_hosts_per_cluster` int unsigned DEFAULT NULL COMMENT 'Max. hosts in cluster supported by hypervisor';
ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `storage_motion_supported` int(1) unsigned DEFAULT 0 COMMENT 'Is storage motion supported';
UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `hypervisor_type`='VMware';
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, storage_motion_supported) VALUES ('XenServer', '6.1.0', 50, 1, 13, 1);
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32);
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, storage_motion_supported) VALUES (UUID(), 'XenServer', '6.1.0', 50, 1, 13, 1);
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES (UUID(), 'VMware', '5.1', 128, 0, 32);
DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max';
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'midonet.apiserver.address', 'http://localhost:8081', 'Specify the address at which the Midonet API server can be contacted (if using Midonet)');
@@ -348,7 +348,7 @@ ALTER TABLE `cloud`.`remote_access_vpn` ADD COLUMN `uuid` varchar(40) UNIQUE;
-- START: support for LXC
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('LXC', 'default', 50, 1);
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES (UUID(), 'LXC', 'default', 50, 1);
ALTER TABLE `cloud`.`physical_network_traffic_types` ADD COLUMN `lxc_network_label` varchar(255) DEFAULT 'cloudbr0' COMMENT 'The network name label of the physical device dedicated to this traffic on a LXC host';
UPDATE configuration SET value='KVM,XenServer,VMware,BareMetal,Ovm,LXC' WHERE name='hypervisor.list';
@@ -1710,5 +1710,5 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'VpcMa
-- Re-enable foreign key checking, at the end of the upgrade path
SET foreign_key_checks = 1;
-
+UPDATE `cloud`.`snapshot_policy` set uuid=id WHERE uuid is NULL;
diff --git a/test/integration/smoke/test_nic.py b/test/integration/smoke/test_nic.py
index bae6dfda15d..8e8d3407dfb 100644
--- a/test/integration/smoke/test_nic.py
+++ b/test/integration/smoke/test_nic.py
@@ -79,11 +79,35 @@ class Services:
"PortForwarding": 'VirtualRouter',
},
},
+ "network_offering_shared": {
+ "name": 'Test Network offering shared',
+ "displaytext": 'Test Network offering Shared',
+ "guestiptype": 'Shared',
+ "supportedservices": 'Dhcp,Dns,UserData',
+ "traffictype": 'GUEST',
+ "specifyVlan" : "True",
+ "specifyIpRanges" : "True",
+ "serviceProviderList" : {
+ "Dhcp": 'VirtualRouter',
+ "Dns": 'VirtualRouter',
+ "UserData": 'VirtualRouter',
+ },
+ },
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"acltype": "Account",
},
+ "network2": {
+ "name": "Test Network Shared",
+ "displaytext": "Test Network Shared",
+ "vlan" :1201,
+ "gateway" :"172.16.15.1",
+ "netmask" :"255.255.255.0",
+ "startip" :"172.16.15.21",
+ "endip" :"172.16.15.41",
+ "acltype": "Account",
+ },
# ISO settings for Attach/Detach ISO tests
"iso": {
"displaytext": "Test ISO",
@@ -176,6 +200,14 @@ class TestDeployVM(cloudstackTestCase):
self.network_offering.update(self.apiclient, state='Enabled') # Enable Network offering
self.services["network"]["networkoffering"] = self.network_offering.id
+ self.network_offering_shared = NetworkOffering.create(
+ self.apiclient,
+ self.services["network_offering_shared"],
+ )
+ self.cleanup.insert(0, self.network_offering_shared)
+ self.network_offering_shared.update(self.apiclient, state='Enabled') # Enable Network offering
+ self.services["network2"]["networkoffering"] = self.network_offering_shared.id
+
################
### Test Network
self.test_network = Network.create(
@@ -185,6 +217,14 @@ class TestDeployVM(cloudstackTestCase):
self.account.domainid,
)
self.cleanup.insert(0, self.test_network)
+ self.test_network2 = Network.create(
+ self.apiclient,
+ self.services["network2"],
+ self.account.name,
+ self.account.domainid,
+ zoneid=self.services["network"]["zoneid"]
+ )
+ self.cleanup.insert(0, self.test_network2)
except Exception as ex:
self.debug("Exception during NIC test SETUP!: " + str(ex))
self.assertEqual(True, False, "Exception during NIC test SETUP!: " + str(ex))
@@ -201,10 +241,10 @@ class TestDeployVM(cloudstackTestCase):
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
- mode=self.services['mode']
+ mode=self.services['mode'],
+ networkids=[self.test_network.id]
)
self.cleanup.insert(0, self.virtual_machine)
-
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
@@ -256,7 +296,7 @@ class TestDeployVM(cloudstackTestCase):
existing_nic_id = vm_response.nic[0].id
# 1. add a nic
- add_response = self.virtual_machine.add_nic(self.apiclient, self.test_network.id)
+ add_response = self.virtual_machine.add_nic(self.apiclient, self.test_network2.id)
time.sleep(5)
# now go get the vm list?
@@ -308,8 +348,9 @@ class TestDeployVM(cloudstackTestCase):
sawException = True
self.assertEqual(sawException, True, "Make sure we cannot delete the default NIC")
-
- self.virtual_machine.remove_nic(self.apiclient, existing_nic_id)
+ self.virtual_machine.update_default_nic(self.apiclient, existing_nic_id)
+ time.sleep(5)
+ self.virtual_machine.remove_nic(self.apiclient, new_nic_id)
time.sleep(5)
list_vm_response = list_virtual_machines(
diff --git a/ui/scripts/network.js b/ui/scripts/network.js
index 6b310ce0e83..3eef1367e97 100755
--- a/ui/scripts/network.js
+++ b/ui/scripts/network.js
@@ -50,7 +50,7 @@
nicId: nic.id
},
success: function(json) {
- var nic = json.listnics.nic[0];
+ var nic = json.listnicsresponse.nic[0];
var ips = nic.secondaryip ? nic.secondaryip : [];
var ipSelection = [];