mirror of https://github.com/apache/cloudstack.git
Merge branch '3.0.x' of ssh://git.cloud.com/var/lib/git/cloudstack-oss into 3.0.x
This commit is contained in:
commit
e90e5e9c8c
|
|
@ -46,6 +46,7 @@ public class EC2InstanceFilterSet {
|
|||
filterTypes.put( "owner-id", "string" );
|
||||
filterTypes.put( "root-device-name", "string" );
|
||||
filterTypes.put( "private-ip-address", "string" );
|
||||
filterTypes.put( "group-id", "string" );
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -154,6 +155,13 @@ public class EC2InstanceFilterSet {
|
|||
{
|
||||
return containsDevice( vm.getRootDeviceId(), valueSet );
|
||||
}
|
||||
else if (filterName.equalsIgnoreCase( "group-id"))
|
||||
{
|
||||
String[] groupSet = vm.getGroupSet();
|
||||
for (String group : groupSet)
|
||||
if (containsString(group, valueSet)) return true;
|
||||
return false;
|
||||
}
|
||||
else return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ label.corrections.saved=Corrections saved
|
|||
message.installWizard.copy.whatIsSecondaryStorage=Secondary storage is associated with a zone, and it stores the following:<ul><li>Templates - OS images that can be used to boot VMs and can include additional configuration information, such as installed applications</li><li>ISO images - OS images that can be bootable or non-bootable</li><li>Disk volume snapshots - saved copies of VM data which can be used for data recovery or to create new templates</ul>
|
||||
message.installWizard.copy.whatIsPrimaryStorage=A CloudStack™ cloud infrastructure makes use of two types of storage: primary storage and secondary storage. Both of these can be iSCSI or NFS servers, or localdisk.<br/><br/><strong>Primary storage</strong> is associated with a cluster, and it stores the disk volumes of each guest VM for all the VMs running on hosts in that cluster. The primary storage server is typically located close to the hosts.
|
||||
message.installWizard.copy.whatIsACluster=A cluster provides a way to group hosts. The hosts in a cluster all have identical hardware, run the same hypervisor, are on the same subnet, and access the same shared storage. Virtual machine instances (VMs) can be live-migrated from one host to another within the same cluster, without interrupting service to the user. A cluster is the third-largest organizational unit within a CloudStack™ deployment. Clusters are contained within pods, and pods are contained within zones.<br/><br/>CloudStack™ allows multiple clusters in a cloud deployment, but for a Basic Installation, we only need one cluster.
|
||||
message.installWizard.copy.whatIsAPod=A pod often represents a single rack. Hosts in the same pod are in the same subnet.<br/><br/>A pod is the second-largest organizational unit within a CloudStack™ deployment. Pods are contained within zones. Each zone can contain one or more pods; in the Basic Installation, you will have just one pod in your zone
|
||||
message.installWizard.copy.whatIsAPod=A pod often represents a single rack. Hosts in the same pod are in the same subnet.<br/><br/>A pod is the second-largest organizational unit within a CloudStack™ deployment. Pods are contained within zones. Each zone can contain one or more pods; in the Basic Installation, you will have just one pod in your zone.
|
||||
message.installWizard.copy.whatIsAZone=A zone is the largest organizational unit within a CloudStack™ deployment. A zone typically corresponds to a single datacenter, although it is permissible to have multiple zones in a datacenter. The benefit of organizing infrastructure into zones is to provide physical isolation and redundancy. For example, each zone can have its own power supply and network uplink, and the zones can be widely separated geographically (though this is not required).
|
||||
message.installWizard.copy.whatIsCloudStack=CloudStack™ is a software platform that pools computing resources to build public, private, and hybrid Infrastructure as a Service (IaaS) clouds. CloudStack™ manages the network, storage, and compute nodes that make up a cloud infrastructure. Use CloudStack™ to deploy, manage, and configure cloud computing environments.<br/><br/>Extending beyond individual virtual machine images running on commodity hardware, CloudStack™ provides a turnkey cloud infrastructure software stack for delivering virtual datacenters as a service - delivering all of the essential components to build, deploy, and manage multi-tier and multi-tenant cloud applications. Both open-source and Premium versions are available, with the open-source version offering nearly identical features.
|
||||
message.installWizard.tooltip.addSecondaryStorage.path=The exported path, located on the server you specified above
|
||||
|
|
@ -504,7 +504,7 @@ label.add.resources=Add Resources
|
|||
label.launch=Launch
|
||||
label.set.up.zone.type=Set up zone type
|
||||
message.please.select.a.configuration.for.your.zone=Please select a configuration for your zone.
|
||||
message.desc.basic.zone=Provide a single network where each VM instance is assigned an IP directly from the network. Guest isolation can be provided through layer-3 means such as security groups (IP address source filtering)
|
||||
message.desc.basic.zone=Provide a single network where each VM instance is assigned an IP directly from the network. Guest isolation can be provided through layer-3 means such as security groups (IP address source filtering).
|
||||
label.basic=Basic
|
||||
message.desc.advanced.zone=For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks and providing custom network offerings such as firewall, VPN, or load balancer support.
|
||||
label.advanced=Advanced
|
||||
|
|
|
|||
|
|
@ -5,8 +5,4 @@
|
|||
/etc/init.d/cloud-agent
|
||||
/usr/bin/agent-runner
|
||||
/usr/bin/cloud-setup-agent
|
||||
/usr/lib/cloud/agent/css
|
||||
/usr/lib/cloud/agent/ui
|
||||
/usr/lib/cloud/agent/js
|
||||
/usr/lib/cloud/agent/images
|
||||
/var/log/cloud/agent
|
||||
|
|
|
|||
|
|
@ -17,9 +17,14 @@ package com.cloud.upgrade.dao;
|
|||
*/
|
||||
import java.io.File;
|
||||
import java.sql.Connection;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
//
|
||||
import com.cloud.dc.DataCenter.NetworkType;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
|
|
@ -53,6 +58,195 @@ public class Upgrade302to303 implements DbUpgrade {
|
|||
|
||||
@Override
|
||||
public void performDataMigration(Connection conn) {
|
||||
setupExternalNetworkDevices(conn);
|
||||
}
|
||||
|
||||
private void setupExternalNetworkDevices(Connection conn) {
|
||||
PreparedStatement dcSearchStmt, pNetworkStmt, devicesStmt = null;
|
||||
ResultSet dcResults, pNetworksResults, devicesResult = null;
|
||||
|
||||
try {
|
||||
dcSearchStmt = conn.prepareStatement("SELECT id, networktype FROM `cloud`.`data_center`");
|
||||
dcResults = dcSearchStmt.executeQuery();
|
||||
while (dcResults.next()) {
|
||||
long zoneId = dcResults.getLong(1);
|
||||
long f5HostId = 0;
|
||||
long srxHostId = 0;
|
||||
|
||||
String networkType = dcResults.getString(2);
|
||||
if (NetworkType.Advanced.toString().equalsIgnoreCase(networkType)) {
|
||||
|
||||
devicesStmt = conn.prepareStatement("SELECT id, type FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' OR type = 'ExternalFirewall'");
|
||||
devicesStmt.setLong(1, zoneId);
|
||||
devicesResult = devicesStmt.executeQuery();
|
||||
|
||||
while (devicesResult.next()) {
|
||||
String device = devicesResult.getString(2);
|
||||
if (device.equals("ExternalLoadBalancer")) {
|
||||
f5HostId = devicesResult.getLong(1);
|
||||
} else if (device.equals("ExternalFirewall")) {
|
||||
srxHostId = devicesResult.getLong(1);
|
||||
}
|
||||
}
|
||||
|
||||
// check if the deployment had F5 and SRX devices
|
||||
if (f5HostId != 0 && srxHostId != 0) {
|
||||
pNetworkStmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where data_center_id=?");
|
||||
pNetworkStmt.setLong(1, zoneId);
|
||||
pNetworksResults = pNetworkStmt.executeQuery();
|
||||
if (pNetworksResults.first()) {
|
||||
long physicalNetworkId = pNetworksResults.getLong(1);
|
||||
|
||||
// add F5BigIP provider and provider instance to physical network
|
||||
addF5ServiceProvider(conn, physicalNetworkId, zoneId);
|
||||
addF5LoadBalancer(conn, f5HostId, physicalNetworkId);
|
||||
|
||||
// add SRX provider and provider instance to physical network
|
||||
addSrxServiceProvider(conn, physicalNetworkId, zoneId);
|
||||
addSrxFirewall(conn, srxHostId, physicalNetworkId);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (dcResults != null) {
|
||||
try {
|
||||
dcResults.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
if (dcSearchStmt != null) {
|
||||
try {
|
||||
dcSearchStmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Exception while adding PhysicalNetworks", e);
|
||||
} finally {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetworkId){
|
||||
// add traffic types
|
||||
PreparedStatement pstmtUpdate = null;
|
||||
try{
|
||||
s_logger.debug("Adding F5 Big IP load balancer with host id " + hostId);
|
||||
String insertF5 = "INSERT INTO `cloud`.`external_load_balancer_devices` (physical_network_id, host_id, provider_name, " +
|
||||
"device_name, capacity, is_dedicated, device_state, allocation_state, is_inline, is_managed, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
|
||||
pstmtUpdate = conn.prepareStatement(insertF5);
|
||||
pstmtUpdate.setLong(1, physicalNetworkId);
|
||||
pstmtUpdate.setLong(2, hostId);
|
||||
pstmtUpdate.setString(3, "F5BigIp");
|
||||
pstmtUpdate.setString(4, "F5BigIp");
|
||||
pstmtUpdate.setLong(5, 0);
|
||||
pstmtUpdate.setBoolean(6, false);
|
||||
pstmtUpdate.setString(7, "Enabled");
|
||||
pstmtUpdate.setString(8, "Shared");
|
||||
pstmtUpdate.setBoolean(9, false);
|
||||
pstmtUpdate.setBoolean(10, false);
|
||||
pstmtUpdate.setString(11, UUID.randomUUID().toString());
|
||||
pstmtUpdate.executeUpdate();
|
||||
pstmtUpdate.close();
|
||||
}catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Exception while adding F5 load balancer due to", e);
|
||||
} finally {
|
||||
if (pstmtUpdate != null) {
|
||||
try {
|
||||
pstmtUpdate.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void addSrxFirewall(Connection conn, long hostId, long physicalNetworkId){
|
||||
// add traffic types
|
||||
PreparedStatement pstmtUpdate = null;
|
||||
try{
|
||||
s_logger.debug("Adding SRX firewall device with host id " + hostId);
|
||||
String insertSrx = "INSERT INTO `cloud`.`external_firewall_devices` (physical_network_id, host_id, provider_name, " +
|
||||
"device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
|
||||
pstmtUpdate = conn.prepareStatement(insertSrx);
|
||||
pstmtUpdate.setLong(1, physicalNetworkId);
|
||||
pstmtUpdate.setLong(2, hostId);
|
||||
pstmtUpdate.setString(3, "JuniperSRX");
|
||||
pstmtUpdate.setString(4, "JuniperSRX");
|
||||
pstmtUpdate.setLong(5, 0);
|
||||
pstmtUpdate.setBoolean(6, false);
|
||||
pstmtUpdate.setString(7, "Enabled");
|
||||
pstmtUpdate.setString(8, "Shared");
|
||||
pstmtUpdate.setString(9, UUID.randomUUID().toString());
|
||||
pstmtUpdate.executeUpdate();
|
||||
pstmtUpdate.close();
|
||||
}catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Exception while adding F5 load balancer due to", e);
|
||||
} finally {
|
||||
if (pstmtUpdate != null) {
|
||||
try {
|
||||
pstmtUpdate.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void addF5ServiceProvider(Connection conn, long physicalNetworkId, long zoneId){
|
||||
PreparedStatement pstmtUpdate = null;
|
||||
try{
|
||||
// add physical network service provider - F5BigIp
|
||||
s_logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp");
|
||||
String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," +
|
||||
"`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," +
|
||||
"`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," +
|
||||
"`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,0,0,0,1,0,0,0,0)";
|
||||
|
||||
pstmtUpdate = conn.prepareStatement(insertPNSP);
|
||||
pstmtUpdate.setString(1, UUID.randomUUID().toString());
|
||||
pstmtUpdate.setLong(2, physicalNetworkId);
|
||||
pstmtUpdate.setString(3, "F5BigIp");
|
||||
pstmtUpdate.setString(4, "Enabled");
|
||||
pstmtUpdate.executeUpdate();
|
||||
pstmtUpdate.close();
|
||||
}catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Exception while adding PhysicalNetworkServiceProvider F5BigIp ", e);
|
||||
} finally {
|
||||
if (pstmtUpdate != null) {
|
||||
try {
|
||||
pstmtUpdate.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long zoneId){
|
||||
PreparedStatement pstmtUpdate = null;
|
||||
try{
|
||||
// add physical network service provider - JuniperSRX
|
||||
s_logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX");
|
||||
String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," +
|
||||
"`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," +
|
||||
"`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," +
|
||||
"`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,1,1,1,0,1,1,0,0)";
|
||||
|
||||
pstmtUpdate = conn.prepareStatement(insertPNSP);
|
||||
pstmtUpdate.setString(1, UUID.randomUUID().toString());
|
||||
pstmtUpdate.setLong(2, physicalNetworkId);
|
||||
pstmtUpdate.setString(3, "JuniperSRX");
|
||||
pstmtUpdate.setString(4, "Enabled");
|
||||
pstmtUpdate.executeUpdate();
|
||||
pstmtUpdate.close();
|
||||
}catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Exception while adding PhysicalNetworkServiceProvider JuniperSRX ", e);
|
||||
} finally {
|
||||
if (pstmtUpdate != null) {
|
||||
try {
|
||||
pstmtUpdate.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ class Services:
|
|||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"disk_offering": {
|
||||
"disk_offering":{
|
||||
"displaytext": "Small",
|
||||
"name": "Small",
|
||||
"disksize": 1
|
||||
|
|
@ -64,8 +64,8 @@ class Services:
|
|||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"security_group": {
|
||||
"name": 'SSH',
|
||||
|
|
@ -107,9 +107,8 @@ class Services:
|
|||
"protocol": 'TCP',
|
||||
"startport": 22,
|
||||
"endport": 22,
|
||||
"cidrlist": '0.0.0.0/0',
|
||||
},
|
||||
"mgmt_server": {
|
||||
"mgmt_server": {
|
||||
"username": "root",
|
||||
"password": "fr3sca",
|
||||
"ipaddress": "192.168.100.21"
|
||||
|
|
@ -118,7 +117,7 @@ class Services:
|
|||
# CentOS 5.3 (64-bit)
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"mode": 'basic',
|
||||
"mode":'basic',
|
||||
# Networking mode: Basic or Advanced
|
||||
}
|
||||
|
||||
|
|
@ -801,7 +800,7 @@ class TestDefaultGroupEgressAfterDeploy(cloudstackTestCase):
|
|||
# --- www.l.google.com ping statistics ---
|
||||
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
|
||||
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
|
||||
self.debug("SSH result: %s" %str(res))
|
||||
self.debug("SSH result: %s" % str(res))
|
||||
except Exception as e:
|
||||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(self.virtual_machine.ipaddress, e)
|
||||
|
|
@ -985,7 +984,7 @@ class TestRevokeEgressRule(cloudstackTestCase):
|
|||
# --- www.l.google.com ping statistics ---
|
||||
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
|
||||
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
|
||||
self.debug("SSH result: %s" % str(res))
|
||||
self.debug("SSH result: %s" % str(res))
|
||||
except Exception as e:
|
||||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(self.virtual_machine.ipaddress, e)
|
||||
|
|
@ -1026,7 +1025,7 @@ class TestRevokeEgressRule(cloudstackTestCase):
|
|||
|
||||
result = security_group.revokeEgress(
|
||||
self.apiclient,
|
||||
id=ssh_egress_rule["ruleid"]
|
||||
id = ssh_egress_rule["ruleid"]
|
||||
)
|
||||
self.debug("Revoke egress rule result: %s" % result)
|
||||
|
||||
|
|
@ -1319,14 +1318,14 @@ class TestMultipleAccountsEgressRuleNeg(cloudstackTestCase):
|
|||
"Authorizing egress rule for sec group ID: %s for ssh access"
|
||||
% security_group.id)
|
||||
# Authorize to only account not CIDR
|
||||
user_secgrp_list = {self.accountB.account.name: 'default'}
|
||||
user_secgrp_list = {self.accountB.account.name: 'default'}
|
||||
|
||||
egress_rule = security_group.authorizeEgress(
|
||||
self.apiclient,
|
||||
self.services["sg_account"],
|
||||
account=self.accountA.account.name,
|
||||
domainid=self.accountA.account.domainid,
|
||||
user_secgrp_list=user_secgrp_list
|
||||
user_secgrp_list=user_secgrp_list
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -1422,7 +1421,7 @@ class TestMultipleAccountsEgressRuleNeg(cloudstackTestCase):
|
|||
|
||||
try:
|
||||
self.debug("SSHing into VM type B from VM A")
|
||||
self.debug("VM IP: %s" % self.virtual_machineB.ssh_ip)
|
||||
self.debug("VM IP: %s" % self.virtual_machineB.ssh_ip)
|
||||
res = ssh.execute("ssh %s@%s" % (
|
||||
self.services["virtual_machine"]["username"],
|
||||
self.virtual_machineB.ssh_ip
|
||||
|
|
@ -1592,14 +1591,14 @@ class TestMultipleAccountsEgressRule(cloudstackTestCase):
|
|||
"Authorizing egress rule for sec group ID: %s for ssh access"
|
||||
% security_groupA.id)
|
||||
# Authorize to only account not CIDR
|
||||
user_secgrp_list = {self.accountB.account.name: security_groupB.name}
|
||||
user_secgrp_list = {self.accountB.account.name: security_groupB.name}
|
||||
|
||||
egress_rule = security_groupA.authorizeEgress(
|
||||
self.apiclient,
|
||||
self.services["sg_account"],
|
||||
account=self.accountA.account.name,
|
||||
domainid=self.accountA.account.domainid,
|
||||
user_secgrp_list=user_secgrp_list
|
||||
user_secgrp_list=user_secgrp_list
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -1716,7 +1715,7 @@ class TestMultipleAccountsEgressRule(cloudstackTestCase):
|
|||
|
||||
try:
|
||||
self.debug("SSHing into VB type B from VM A")
|
||||
self.debug("VM IP: %s" % self.virtual_machineB.ssh_ip)
|
||||
self.debug("VM IP: %s" % self.virtual_machineB.ssh_ip)
|
||||
|
||||
res = ssh.execute("ssh %s@%s" % (
|
||||
self.services["virtual_machine"]["username"],
|
||||
|
|
@ -1945,7 +1944,6 @@ class TestStartStopVMWithEgressRule(cloudstackTestCase):
|
|||
)
|
||||
return
|
||||
|
||||
|
||||
@unittest.skip("Valid bug- ID: CS-12647")
|
||||
class TestInvalidParametersForEgress(cloudstackTestCase):
|
||||
|
||||
|
|
@ -2306,12 +2304,12 @@ class TestEgressAfterHostMaintainance(cloudstackTestCase):
|
|||
)
|
||||
vm = vms[0]
|
||||
|
||||
self.debug("Enabling host maintainance for ID: %s" % vm.hostid)
|
||||
self.debug("Enabling host maintainance for ID: %s" % host.id)
|
||||
cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
|
||||
cmd.id = vm.hostid
|
||||
self.apiclient.prepareHostForMaintenance(cmd)
|
||||
|
||||
self.debug("Canceling host maintainance for ID: %s" % vm.hostid)
|
||||
self.debug("Canceling host maintainance for ID: %s" % host.id)
|
||||
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
|
||||
cmd.id = vm.hostid
|
||||
self.apiclient.cancelHostMaintenance(cmd)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -77,7 +77,19 @@ class Services:
|
|||
"publicport": 22,
|
||||
"protocol": 'TCP',
|
||||
},
|
||||
"ostypeid": '9958b10f-9e5d-4ef1-908d-a047372d823b',
|
||||
"templates": {
|
||||
"displaytext": "Public Template",
|
||||
"name": "Public template",
|
||||
"ostypeid": '946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
|
||||
"url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2",
|
||||
"hypervisor": 'XenServer',
|
||||
"format" : 'VHD',
|
||||
"isfeatured": True,
|
||||
"ispublic": True,
|
||||
"isextractable": True,
|
||||
"templatefilter": 'self',
|
||||
},
|
||||
"ostypeid": '946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
|
||||
# Cent OS 5.3 (64 bit)
|
||||
"sleep": 60,
|
||||
"timeout": 100,
|
||||
|
|
@ -127,14 +139,14 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
]
|
||||
return
|
||||
|
||||
# @classmethod
|
||||
# def tearDownClass(cls):
|
||||
# try:
|
||||
# #Cleanup resources used
|
||||
# cleanup_resources(cls.api_client, cls._cleanup)
|
||||
# except Exception as e:
|
||||
# raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
# return
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
#Cleanup resources used
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
|
|
@ -148,15 +160,15 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
self.cleanup = [self.account]
|
||||
return
|
||||
|
||||
# def tearDown(self):
|
||||
# try:
|
||||
# #Clean up, terminate the created accounts, domains etc
|
||||
# cleanup_resources(self.apiclient, self.cleanup)
|
||||
# self.testClient.close()
|
||||
# except Exception as e:
|
||||
# raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
# return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
#Clean up, terminate the created accounts, domains etc
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
self.testClient.close()
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
@unittest.skip("skipped")
|
||||
def test_01_host_maintenance_mode(self):
|
||||
"""Test host maintenance mode
|
||||
"""
|
||||
|
|
@ -255,7 +267,7 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
))
|
||||
self.debug("Creating PF rule for IP address: %s" %
|
||||
public_ip.ipaddress.ipaddress)
|
||||
nat_rule= NATRule.create(
|
||||
nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
virtual_machine,
|
||||
self.services["natrule"],
|
||||
|
|
@ -265,6 +277,15 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
self.debug("Creating LB rule on IP with NAT: %s" %
|
||||
public_ip.ipaddress.ipaddress)
|
||||
|
||||
# Open up firewall port for SSH
|
||||
fw_rule = FireWallRule.create(
|
||||
self.apiclient,
|
||||
ipaddressid=public_ip.ipaddress.id,
|
||||
protocol=self.services["natrule"]["protocol"],
|
||||
cidrlist=['0.0.0.0/0'],
|
||||
startport=self.services["natrule"]["publicport"],
|
||||
endport=self.services["natrule"]["publicport"]
|
||||
)
|
||||
# Create Load Balancer rule on IP already having NAT rule
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -318,7 +339,11 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
vm = vms[0]
|
||||
|
||||
self.debug("VM 1 state: %s" % vm.state)
|
||||
if vm.state in ["Stopping", "Stopped", "Running", "Starting"]:
|
||||
if vm.state in ["Stopping",
|
||||
"Stopped",
|
||||
"Running",
|
||||
"Starting",
|
||||
"Migrating"]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
|
|
@ -418,7 +443,53 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
self.debug(
|
||||
"VM state after enabling maintenance on first host: %s" %
|
||||
vm.state)
|
||||
if vm.state in ["Stopping", "Stopped", "Running", "Starting"]:
|
||||
if vm.state in [
|
||||
"Stopping",
|
||||
"Stopped",
|
||||
"Running",
|
||||
"Starting",
|
||||
"Migrating"
|
||||
]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
else:
|
||||
self.fail(
|
||||
"VM migration from one-host-to-other failed while enabling maintenance"
|
||||
)
|
||||
|
||||
# Poll and check the status of VMs
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[1]
|
||||
self.debug(
|
||||
"VM state after enabling maintenance on first host: %s" %
|
||||
vm.state)
|
||||
if vm.state in [
|
||||
"Stopping",
|
||||
"Stopped",
|
||||
"Running",
|
||||
"Starting",
|
||||
"Migrating"
|
||||
]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
|
|
@ -438,6 +509,7 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
"Running",
|
||||
"Deployed VM should be in Running state"
|
||||
)
|
||||
|
||||
# Spawn an instance on other host
|
||||
virtual_machine_3 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
|
|
@ -486,6 +558,7 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
cmd.id = second_host
|
||||
self.apiclient.cancelHostMaintenance(cmd)
|
||||
self.debug("Maintenance mode canceled for host: %s" % second_host)
|
||||
|
||||
self.debug("Waiting for SSVMs to come up")
|
||||
wait_for_ssvms(
|
||||
self.apiclient,
|
||||
|
|
@ -596,13 +669,23 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
))
|
||||
self.debug("Creating PF rule for IP address: %s" %
|
||||
public_ip.ipaddress.ipaddress)
|
||||
nat_rule= NATRule.create(
|
||||
nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
virtual_machine,
|
||||
self.services["natrule"],
|
||||
ipaddressid=public_ip.ipaddress.id
|
||||
)
|
||||
|
||||
# Open up firewall port for SSH
|
||||
fw_rule = FireWallRule.create(
|
||||
self.apiclient,
|
||||
ipaddressid=public_ip.ipaddress.id,
|
||||
protocol=self.services["natrule"]["protocol"],
|
||||
cidrlist=['0.0.0.0/0'],
|
||||
startport=self.services["natrule"]["publicport"],
|
||||
endport=self.services["natrule"]["publicport"]
|
||||
)
|
||||
|
||||
self.debug("Creating LB rule on IP with NAT: %s" %
|
||||
public_ip.ipaddress.ipaddress)
|
||||
|
||||
|
|
@ -624,7 +707,7 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(virtual_machine.ipaddress, e)
|
||||
)
|
||||
# Get the Root disk of VM
|
||||
# Get the Root disk of VM
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=virtual_machine.id,
|
||||
|
|
@ -662,6 +745,7 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
snapshot.id,
|
||||
"Check snapshot id in list resources call"
|
||||
)
|
||||
|
||||
# Generate template from the snapshot
|
||||
self.debug("Generating template from snapshot: %s" % snapshot.name)
|
||||
template = Template.create_from_snapshot(
|
||||
|
|
@ -669,7 +753,6 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
snapshot,
|
||||
self.services["templates"]
|
||||
)
|
||||
self.cleanup.append(template)
|
||||
self.debug("Created template from snapshot: %s" % template.id)
|
||||
|
||||
templates = list_templates(
|
||||
|
|
@ -725,7 +808,11 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
vm = vms[0]
|
||||
|
||||
self.debug("VM 1 state: %s" % vm.state)
|
||||
if vm.state in ["Stopping", "Stopped", "Running", "Starting"]:
|
||||
if vm.state in ["Stopping",
|
||||
"Stopped",
|
||||
"Running",
|
||||
"Starting",
|
||||
"Migrating"]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
|
|
@ -789,7 +876,7 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
self.apiclient.cancelHostMaintenance(cmd)
|
||||
self.debug("Maintenance mode canceled for host: %s" % first_host)
|
||||
|
||||
# Get the Root disk of VM
|
||||
# Get the Root disk of VM
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=virtual_machine_2.id,
|
||||
|
|
@ -827,6 +914,7 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
snapshot.id,
|
||||
"Check snapshot id in list resources call"
|
||||
)
|
||||
|
||||
# Generate template from the snapshot
|
||||
self.debug("Generating template from snapshot: %s" % snapshot.name)
|
||||
template = Template.create_from_snapshot(
|
||||
|
|
@ -834,7 +922,6 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
snapshot,
|
||||
self.services["templates"]
|
||||
)
|
||||
self.cleanup.append(template)
|
||||
self.debug("Created template from snapshot: %s" % template.id)
|
||||
|
||||
templates = list_templates(
|
||||
|
|
@ -892,7 +979,49 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
self.debug(
|
||||
"VM state after enabling maintenance on first host: %s" %
|
||||
vm.state)
|
||||
if vm.state in ["Stopping", "Stopped", "Running", "Starting"]:
|
||||
if vm.state in ["Stopping",
|
||||
"Stopped",
|
||||
"Running",
|
||||
"Starting",
|
||||
"Migrating"]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
else:
|
||||
self.fail(
|
||||
"VM migration from one-host-to-other failed while enabling maintenance"
|
||||
)
|
||||
|
||||
# Poll and check the status of VMs
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[1]
|
||||
self.debug(
|
||||
"VM state after enabling maintenance on first host: %s" %
|
||||
vm.state)
|
||||
if vm.state in ["Stopping",
|
||||
"Stopped",
|
||||
"Running",
|
||||
"Starting",
|
||||
"Migrating"]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
|
|
@ -912,6 +1041,7 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
"Running",
|
||||
"Deployed VM should be in Running state"
|
||||
)
|
||||
|
||||
# Spawn an instance on other host
|
||||
virtual_machine_3 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
|
|
@ -945,21 +1075,12 @@ class TestHighAvailability(cloudstackTestCase):
|
|||
"Deployed VM should be in Running state"
|
||||
)
|
||||
|
||||
# Should be able to SSH VM
|
||||
try:
|
||||
self.debug("SSH into VM: %s" % virtual_machine.id)
|
||||
ssh = virtual_machine.get_ssh_client(
|
||||
ipaddress=public_ip.ipaddress.ipaddress)
|
||||
except Exception as e:
|
||||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(virtual_machine.ipaddress, e)
|
||||
)
|
||||
|
||||
self.debug("Canceling host maintenance for ID: %s" % second_host)
|
||||
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
|
||||
cmd.id = second_host
|
||||
self.apiclient.cancelHostMaintenance(cmd)
|
||||
self.debug("Maintenance mode canceled for host: %s" % second_host)
|
||||
|
||||
self.debug("Waiting for SSVMs to come up")
|
||||
wait_for_ssvms(
|
||||
self.apiclient,
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class Services:
|
|||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"network_offering": {
|
||||
|
|
@ -54,16 +54,16 @@ class Services:
|
|||
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
|
||||
"traffictype": 'GUEST',
|
||||
"availability": 'Optional',
|
||||
"serviceProviderList": {
|
||||
"serviceProviderList" : {
|
||||
"Dhcp": 'VirtualRouter',
|
||||
"Dns": 'VirtualRouter',
|
||||
"SourceNat": 'VirtualRouter',
|
||||
"PortForwarding": 'VirtualRouter',
|
||||
"Vpn": 'VirtualRouter',
|
||||
"Firewall": 'VirtualRouter',
|
||||
"Lb": 'VirtualRouter',
|
||||
"UserData": 'VirtualRouter',
|
||||
"StaticNat": 'VirtualRouter',
|
||||
"Vpn": 'VirtualRouter',
|
||||
"Firewall": 'VirtualRouter',
|
||||
"Lb": 'VirtualRouter',
|
||||
"UserData": 'VirtualRouter',
|
||||
"StaticNat": 'VirtualRouter',
|
||||
},
|
||||
},
|
||||
"network_offering_netscaler": {
|
||||
|
|
@ -73,7 +73,7 @@ class Services:
|
|||
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
|
||||
"traffictype": 'GUEST',
|
||||
"availability": 'Optional',
|
||||
"serviceProviderList": {
|
||||
"serviceProviderList" : {
|
||||
"Dhcp": 'VirtualRouter',
|
||||
"Dns": 'VirtualRouter',
|
||||
"SourceNat": 'VirtualRouter',
|
||||
|
|
@ -115,7 +115,7 @@ class Services:
|
|||
"publicport": 66,
|
||||
"protocol": "TCP"
|
||||
},
|
||||
"fw_rule": {
|
||||
"fw_rule":{
|
||||
"startport": 1,
|
||||
"endport": 6000,
|
||||
"cidr": '55.55.0.0/11',
|
||||
|
|
@ -137,7 +137,7 @@ class Services:
|
|||
# Cent OS 5.3 (64 bit)
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"mode": 'advanced'
|
||||
"mode":'advanced'
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -240,7 +240,7 @@ class TestNOVirtualRouter(cloudstackTestCase):
|
|||
|
||||
self.debug("Created n/w offering with ID: %s" %
|
||||
self.network_offering.id)
|
||||
# Enable Network offering
|
||||
# Enable Network offering
|
||||
self.network_offering.update(self.apiclient, state='Enabled')
|
||||
|
||||
# Creating network using the network offering created
|
||||
|
|
@ -252,7 +252,7 @@ class TestNOVirtualRouter(cloudstackTestCase):
|
|||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
networkofferingid=self.network_offering.id,
|
||||
zoneid=self.zone.id
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.debug("Created network with ID: %s" % self.network.id)
|
||||
|
||||
|
|
@ -268,6 +268,7 @@ class TestNOVirtualRouter(cloudstackTestCase):
|
|||
networkids=[str(self.network.id)]
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % self.network.id)
|
||||
|
||||
src_nat_list = PublicIPAddress.list(
|
||||
self.apiclient,
|
||||
associatednetworkid=self.network.id,
|
||||
|
|
@ -286,7 +287,9 @@ class TestNOVirtualRouter(cloudstackTestCase):
|
|||
0,
|
||||
"Length of response from listPublicIp should not be 0"
|
||||
)
|
||||
|
||||
src_nat = src_nat_list[0]
|
||||
|
||||
self.debug("Trying to create LB rule on source NAT IP: %s" %
|
||||
src_nat.ipaddress)
|
||||
# Create Load Balancer rule with source NAT
|
||||
|
|
@ -326,7 +329,7 @@ class TestNOVirtualRouter(cloudstackTestCase):
|
|||
ip_with_nat_rule.ipaddress.ipaddress)
|
||||
NATRule.create(
|
||||
self.apiclient,
|
||||
virtual_machine,
|
||||
virtual_machine,
|
||||
self.services["natrule"],
|
||||
ipaddressid=ip_with_nat_rule.ipaddress.id
|
||||
)
|
||||
|
|
@ -478,7 +481,7 @@ class TestNOVirtualRouter(cloudstackTestCase):
|
|||
|
||||
self.debug("Created n/w offering with ID: %s" %
|
||||
self.network_offering.id)
|
||||
# Enable Network offering
|
||||
# Enable Network offering
|
||||
self.network_offering.update(self.apiclient, state='Enabled')
|
||||
|
||||
# Creating network using the network offering created
|
||||
|
|
@ -490,7 +493,7 @@ class TestNOVirtualRouter(cloudstackTestCase):
|
|||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
networkofferingid=self.network_offering.id,
|
||||
zoneid=self.zone.id
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.debug("Created network with ID: %s" % self.network.id)
|
||||
|
||||
|
|
@ -506,6 +509,7 @@ class TestNOVirtualRouter(cloudstackTestCase):
|
|||
networkids=[str(self.network.id)]
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % self.network.id)
|
||||
|
||||
src_nat_list = PublicIPAddress.list(
|
||||
self.apiclient,
|
||||
associatednetworkid=self.network.id,
|
||||
|
|
@ -694,7 +698,7 @@ class TestNOVirtualRouter(cloudstackTestCase):
|
|||
vpns = Vpn.list(
|
||||
self.apiclient,
|
||||
publicipid=src_nat.id,
|
||||
listall=True,
|
||||
listall=True,
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -789,13 +793,14 @@ class TestNOWithNetscaler(cloudstackTestCase):
|
|||
# 5. On an ipaddress that has Lb rules , we should NOT allow firewall
|
||||
# rules to be programmed.
|
||||
# 6. On an ipaddress that has Lb rules , we should NOT allow PF rules
|
||||
# to be programmed.
|
||||
# to be programmed.
|
||||
# 7. We should be allowed to program multiple PF rules on the same Ip
|
||||
# address on different public ports.
|
||||
# 8. We should be allowed to program multiple LB rules on the same Ip
|
||||
# address for different public port ranges.
|
||||
# address for different public port ranges.
|
||||
# 9. On source NAT ipaddress, we should NOT be allowed to Enable VPN.
|
||||
|
||||
|
||||
# Create a network offering with all virtual router services enabled
|
||||
self.debug(
|
||||
"Creating n/w offering with all services in VR & conserve mode:ON"
|
||||
|
|
@ -837,6 +842,7 @@ class TestNOWithNetscaler(cloudstackTestCase):
|
|||
networkids=[str(self.network.id)]
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % self.network.id)
|
||||
|
||||
src_nat_list = PublicIPAddress.list(
|
||||
self.apiclient,
|
||||
associatednetworkid=self.network.id,
|
||||
|
|
@ -882,7 +888,7 @@ class TestNOWithNetscaler(cloudstackTestCase):
|
|||
)
|
||||
self.debug("Creating firewall rule on source NAT: %s" %
|
||||
src_nat.ipaddress)
|
||||
#Create Firewall rule on source NAT
|
||||
#Create Firewall rule on source NAT
|
||||
fw_rule = FireWallRule.create(
|
||||
self.apiclient,
|
||||
ipaddressid=src_nat.id,
|
||||
|
|
@ -1056,13 +1062,14 @@ class TestNOWithNetscaler(cloudstackTestCase):
|
|||
# 5. On an ipaddress that has Lb rules , we should NOT allow firewall
|
||||
# rules to be programmed.
|
||||
# 6. On an ipaddress that has Lb rules , we should NOT allow PF rules
|
||||
# to be programmed.
|
||||
# to be programmed.
|
||||
# 7. We should be allowed to program multiple PF rules on the same Ip
|
||||
# address on different public ports.
|
||||
# 8. We should be allowed to program multiple LB rules on the same Ip
|
||||
# address for different public port ranges.
|
||||
# address for different public port ranges.
|
||||
# 9. On source NAT ipaddress, we should be allowed to Enable VPN.
|
||||
|
||||
|
||||
# Create a network offering with all virtual router services enabled
|
||||
self.debug(
|
||||
"Creating n/w offering with all services in VR & conserve mode:ON"
|
||||
|
|
@ -1104,6 +1111,7 @@ class TestNOWithNetscaler(cloudstackTestCase):
|
|||
networkids=[str(self.network.id)]
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % self.network.id)
|
||||
|
||||
src_nat_list = PublicIPAddress.list(
|
||||
self.apiclient,
|
||||
associatednetworkid=self.network.id,
|
||||
|
|
@ -1452,6 +1460,7 @@ class TestNetworkUpgrade(cloudstackTestCase):
|
|||
networkids=[str(self.network.id)]
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % self.network.id)
|
||||
|
||||
src_nat_list = PublicIPAddress.list(
|
||||
self.apiclient,
|
||||
associatednetworkid=self.network.id,
|
||||
|
|
@ -1535,6 +1544,7 @@ class TestNetworkUpgrade(cloudstackTestCase):
|
|||
)
|
||||
self.cleanup.append(ns_lb_offering)
|
||||
ns_lb_offering.update(self.apiclient, state='Enabled')
|
||||
|
||||
#Stop all the VMs associated with network to update cidr
|
||||
self.debug("Stopping the VM: %s" % virtual_machine.name)
|
||||
virtual_machine.stop(self.apiclient)
|
||||
|
|
@ -1549,6 +1559,7 @@ class TestNetworkUpgrade(cloudstackTestCase):
|
|||
)
|
||||
|
||||
self.debug("Network upgrade failed!")
|
||||
|
||||
self.debug("Deleting LB Rule: %s" % lb_rule.id)
|
||||
lb_rule.delete(self.apiclient)
|
||||
self.debug("LB rule deleted")
|
||||
|
|
@ -1646,6 +1657,7 @@ class TestNetworkUpgrade(cloudstackTestCase):
|
|||
networkids=[str(self.network.id)]
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % self.network.id)
|
||||
|
||||
src_nat_list = PublicIPAddress.list(
|
||||
self.apiclient,
|
||||
associatednetworkid=self.network.id,
|
||||
|
|
@ -1729,6 +1741,7 @@ class TestNetworkUpgrade(cloudstackTestCase):
|
|||
)
|
||||
self.cleanup.append(ns_lb_offering)
|
||||
ns_lb_offering.update(self.apiclient, state='Enabled')
|
||||
|
||||
#Stop all the VMs associated with network to update cidr
|
||||
self.debug("Stopping the VM: %s" % virtual_machine.name)
|
||||
virtual_machine.stop(self.apiclient)
|
||||
|
|
@ -1743,6 +1756,7 @@ class TestNetworkUpgrade(cloudstackTestCase):
|
|||
)
|
||||
|
||||
self.debug("Network upgrade failed!")
|
||||
|
||||
self.debug("Deleting LB Rule: %s" % lb_rule.id)
|
||||
lb_rule.delete(self.apiclient)
|
||||
self.debug("LB rule deleted")
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ from marvin.cloudstackAPI import *
|
|||
from integration.lib.utils import *
|
||||
from integration.lib.base import *
|
||||
from integration.lib.common import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
import datetime
|
||||
|
||||
class Services:
|
||||
|
|
@ -1311,4 +1312,4 @@ class TestSecurityGroup(cloudstackTestCase):
|
|||
domainid=self.account.account.domainid,
|
||||
securitygroupids=[security_group.id],
|
||||
)
|
||||
return
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1806,4 +1806,4 @@ class TestProjectSuspendActivate(cloudstackTestCase):
|
|||
'Running',
|
||||
"VM should be in Running state after project activation"
|
||||
)
|
||||
return
|
||||
return
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
"""
|
||||
#Import Local Modules
|
||||
import marvin
|
||||
from marvin.cloudstackTestCase import *
|
||||
from marvin.cloudstackAPI import *
|
||||
from integration.lib.utils import *
|
||||
from integration.lib.base import *
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@
|
|||
import marvin
|
||||
from marvin.cloudstackTestCase import *
|
||||
from marvin.cloudstackAPI import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
from integration.lib.utils import *
|
||||
from integration.lib.base import *
|
||||
from integration.lib.common import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
|
||||
#Import System modules
|
||||
import time
|
||||
|
|
@ -219,7 +219,7 @@ class TestRouterServices(cloudstackTestCase):
|
|||
True,
|
||||
"Check for list networks response return valid data"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(networks),
|
||||
0,
|
||||
|
|
@ -228,7 +228,7 @@ class TestRouterServices(cloudstackTestCase):
|
|||
for network in networks:
|
||||
self.assertIn(
|
||||
network.state,
|
||||
['Implemented','Allocated'],
|
||||
['Implemented', 'Allocated'],
|
||||
"Check list network response for network state"
|
||||
)
|
||||
self.debug("Network ID: %s & Network state: %s" % (
|
||||
|
|
@ -371,7 +371,7 @@ class TestRouterServices(cloudstackTestCase):
|
|||
for network in networks:
|
||||
self.assertIn(
|
||||
network.state,
|
||||
['Implemented','Allocated'],
|
||||
['Implemented', 'Allocated'],
|
||||
"Check list network response for network state"
|
||||
)
|
||||
self.debug("Network ID: %s & Network state: %s" % (
|
||||
|
|
@ -486,7 +486,7 @@ class TestRouterServices(cloudstackTestCase):
|
|||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
self.debug("Deployed a VM with ID: %s" % vm.id)
|
||||
|
||||
|
||||
virtual_machines = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=vm.id,
|
||||
|
|
@ -499,7 +499,7 @@ class TestRouterServices(cloudstackTestCase):
|
|||
True,
|
||||
"Check for list virtual machines response return valid data"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(virtual_machines),
|
||||
0,
|
||||
|
|
@ -524,7 +524,7 @@ class TestRouterServices(cloudstackTestCase):
|
|||
True,
|
||||
"Check for list routers response return valid data"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(routers),
|
||||
0,
|
||||
|
|
@ -556,7 +556,7 @@ class TestRouterServices(cloudstackTestCase):
|
|||
True,
|
||||
"Check for list VMs response return valid data"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(virtual_machines),
|
||||
0,
|
||||
|
|
@ -678,7 +678,7 @@ class TestRouterStopCreatePF(cloudstackTestCase):
|
|||
router = routers[0]
|
||||
|
||||
self.debug("Stopping router ID: %s" % router.id)
|
||||
|
||||
|
||||
#Stop the router
|
||||
cmd = stopRouter.stopRouterCmd()
|
||||
cmd.id = router.id
|
||||
|
|
@ -715,6 +715,16 @@ class TestRouterStopCreatePF(cloudstackTestCase):
|
|||
)
|
||||
public_ip = public_ips[0]
|
||||
|
||||
# Open up firewall port for SSH
|
||||
fw_rule = FireWallRule.create(
|
||||
self.apiclient,
|
||||
ipaddressid=public_ip.id,
|
||||
protocol=self.services["natrule"]["protocol"],
|
||||
cidrlist=['0.0.0.0/0'],
|
||||
startport=self.services["natrule"]["publicport"],
|
||||
endport=self.services["natrule"]["publicport"]
|
||||
)
|
||||
|
||||
self.debug("Creating NAT rule for VM ID: %s" % self.vm_1.id)
|
||||
#Create NAT rule
|
||||
nat_rule = NATRule.create(
|
||||
|
|
@ -766,7 +776,7 @@ class TestRouterStopCreatePF(cloudstackTestCase):
|
|||
try:
|
||||
|
||||
self.debug("SSH into VM with ID: %s" % nat_rule.ipaddress)
|
||||
|
||||
|
||||
self.vm_1.ssh_port = nat_rule.publicport
|
||||
self.vm_1.get_ssh_client(nat_rule.ipaddress)
|
||||
except Exception as e:
|
||||
|
|
@ -864,7 +874,7 @@ class TestRouterStopCreateLB(cloudstackTestCase):
|
|||
True,
|
||||
"Check for list routers response return valid data"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(routers),
|
||||
0,
|
||||
|
|
@ -908,6 +918,16 @@ class TestRouterStopCreateLB(cloudstackTestCase):
|
|||
"Check for list public IPs response return valid data"
|
||||
)
|
||||
public_ip = public_ips[0]
|
||||
|
||||
# Open up firewall port for SSH
|
||||
fw_rule = FireWallRule.create(
|
||||
self.apiclient,
|
||||
ipaddressid=public_ip.id,
|
||||
protocol=self.services["lbrule"]["protocol"],
|
||||
cidrlist=['0.0.0.0/0'],
|
||||
startport=self.services["lbrule"]["publicport"],
|
||||
endport=self.services["lbrule"]["publicport"]
|
||||
)
|
||||
self.debug("Creating LB rule for public IP: %s" % public_ip.id)
|
||||
#Create Load Balancer rule and assign VMs to rule
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
|
|
@ -1071,7 +1091,7 @@ class TestRouterStopCreateFW(cloudstackTestCase):
|
|||
)
|
||||
|
||||
router = routers[0]
|
||||
|
||||
|
||||
self.debug("Stopping the router: %s" % router.id)
|
||||
#Stop the router
|
||||
cmd = stopRouter.stopRouterCmd()
|
||||
|
|
@ -1135,7 +1155,7 @@ class TestRouterStopCreateFW(cloudstackTestCase):
|
|||
True,
|
||||
"Check for list routers response return valid data"
|
||||
)
|
||||
|
||||
|
||||
router = routers[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -1153,7 +1173,7 @@ class TestRouterStopCreateFW(cloudstackTestCase):
|
|||
True,
|
||||
"Check for list FW rules response return valid data"
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
fw_rules[0].state,
|
||||
'Active',
|
||||
|
|
|
|||
|
|
@ -18,10 +18,10 @@
|
|||
import marvin
|
||||
from marvin.cloudstackTestCase import *
|
||||
from marvin.cloudstackAPI import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
from integration.lib.utils import *
|
||||
from integration.lib.base import *
|
||||
from integration.lib.common import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
|
||||
#Import System modules
|
||||
import time
|
||||
|
|
@ -1625,4 +1625,4 @@ class TestIngressRule(cloudstackTestCase):
|
|||
self.fail("SSH access failed for ingress rule ID: %s" \
|
||||
% ingress_rule["id"]
|
||||
)
|
||||
return
|
||||
return
|
||||
|
|
|
|||
|
|
@ -860,7 +860,7 @@ class TestSnapshotDetachedDisk(cloudstackTestCase):
|
|||
self.services["sub_lvl_dir2"],
|
||||
self.services["random_data"]
|
||||
),
|
||||
"sync",
|
||||
"sync",
|
||||
]
|
||||
for c in cmds:
|
||||
self.debug(ssh_client.execute(c))
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ from marvin.cloudstackAPI import *
|
|||
from integration.lib.utils import *
|
||||
from integration.lib.base import *
|
||||
from integration.lib.common import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
import urllib
|
||||
from random import random
|
||||
#Import System modules
|
||||
|
|
|
|||
|
|
@ -743,7 +743,7 @@ class TestAttachVolumeISO(cloudstackTestCase):
|
|||
iso.id,
|
||||
self.account.account.name
|
||||
))
|
||||
self.cleanup.append(iso)
|
||||
|
||||
try:
|
||||
self.debug("Downloading ISO with ID: %s" % iso.id)
|
||||
iso.download(self.apiclient)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -9,7 +9,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
"""Common functions
|
||||
"""
|
||||
|
|
@ -25,6 +25,7 @@ from base import *
|
|||
#Import System modules
|
||||
import time
|
||||
|
||||
|
||||
def get_domain(apiclient, services=None):
|
||||
"Returns a default domain"
|
||||
|
||||
|
|
@ -32,13 +33,14 @@ def get_domain(apiclient, services=None):
|
|||
if services:
|
||||
if "domainid" in services:
|
||||
cmd.id = services["domainid"]
|
||||
|
||||
|
||||
domains = apiclient.listDomains(cmd)
|
||||
|
||||
|
||||
if isinstance(domains, list):
|
||||
return domains[0]
|
||||
else:
|
||||
raise Exception("Failed to find specified domain.")
|
||||
raise Exception("Failed to find specified domain.")
|
||||
|
||||
|
||||
def get_zone(apiclient, services=None):
|
||||
"Returns a default zone"
|
||||
|
|
@ -47,13 +49,14 @@ def get_zone(apiclient, services=None):
|
|||
if services:
|
||||
if "zoneid" in services:
|
||||
cmd.id = services["zoneid"]
|
||||
|
||||
|
||||
zones = apiclient.listZones(cmd)
|
||||
|
||||
|
||||
if isinstance(zones, list):
|
||||
return zones[0]
|
||||
else:
|
||||
raise Exception("Failed to find specified zone.")
|
||||
raise Exception("Failed to find specified zone.")
|
||||
|
||||
|
||||
def get_pod(apiclient, zoneid, services=None):
|
||||
"Returns a default pod for specified zone"
|
||||
|
|
@ -64,13 +67,14 @@ def get_pod(apiclient, zoneid, services=None):
|
|||
if services:
|
||||
if "podid" in services:
|
||||
cmd.id = services["podid"]
|
||||
|
||||
|
||||
pods = apiclient.listPods(cmd)
|
||||
|
||||
|
||||
if isinstance(pods, list):
|
||||
return pods[0]
|
||||
else:
|
||||
raise Exception("Exception: Failed to find specified pod.")
|
||||
raise Exception("Exception: Failed to find specified pod.")
|
||||
|
||||
|
||||
def get_template(apiclient, zoneid, ostypeid=12, services=None):
|
||||
"Returns a template"
|
||||
|
|
@ -88,11 +92,12 @@ def get_template(apiclient, zoneid, ostypeid=12, services=None):
|
|||
for template in list_templates:
|
||||
if template.ostypeid == ostypeid:
|
||||
return template
|
||||
|
||||
|
||||
raise Exception("Exception: Failed to find template with OSTypeID: %s" %
|
||||
ostypeid)
|
||||
ostypeid)
|
||||
return
|
||||
|
||||
|
||||
def download_systemplates_sec_storage(server, services):
|
||||
"""Download System templates on sec storage"""
|
||||
|
||||
|
|
@ -104,7 +109,7 @@ def download_systemplates_sec_storage(server, services):
|
|||
server["username"],
|
||||
server["password"]
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise Exception("SSH access failted for server with IP address: %s" %
|
||||
server["ipaddess"])
|
||||
# Mount Secondary Storage on Management Server
|
||||
|
|
@ -136,6 +141,7 @@ def download_systemplates_sec_storage(server, services):
|
|||
raise Exception("Failed to download System Templates on Sec Storage")
|
||||
return
|
||||
|
||||
|
||||
def wait_for_ssvms(apiclient, zoneid, podid, interval=60):
|
||||
"""After setup wait for SSVMs to come Up"""
|
||||
|
||||
|
|
@ -179,18 +185,20 @@ def wait_for_ssvms(apiclient, zoneid, podid, interval=60):
|
|||
break
|
||||
return
|
||||
|
||||
def download_builtin_templates(apiclient, zoneid, hypervisor, host, linklocalip, interval=60):
|
||||
|
||||
def download_builtin_templates(apiclient, zoneid, hypervisor, host,
|
||||
linklocalip, interval=60):
|
||||
"""After setup wait till builtin templates are downloaded"""
|
||||
|
||||
|
||||
# Change IPTABLES Rules
|
||||
result = get_process_status(
|
||||
host["ipaddress"],
|
||||
host["port"],
|
||||
host["username"],
|
||||
host["password"],
|
||||
linklocalip,
|
||||
"iptables -P INPUT ACCEPT"
|
||||
)
|
||||
get_process_status(
|
||||
host["ipaddress"],
|
||||
host["port"],
|
||||
host["username"],
|
||||
host["password"],
|
||||
linklocalip,
|
||||
"iptables -P INPUT ACCEPT"
|
||||
)
|
||||
time.sleep(interval)
|
||||
# Find the BUILTIN Templates for given Zone, Hypervisor
|
||||
list_template_response = list_templates(
|
||||
|
|
@ -199,10 +207,10 @@ def download_builtin_templates(apiclient, zoneid, hypervisor, host, linklocalip,
|
|||
zoneid=zoneid,
|
||||
templatefilter='self'
|
||||
)
|
||||
|
||||
|
||||
if not isinstance(list_template_response, list):
|
||||
raise Exception("Failed to download BUILTIN templates")
|
||||
|
||||
|
||||
# Ensure all BUILTIN templates are downloaded
|
||||
templateid = None
|
||||
for template in list_template_response:
|
||||
|
|
@ -223,20 +231,21 @@ def download_builtin_templates(apiclient, zoneid, hypervisor, host, linklocalip,
|
|||
# If template is ready,
|
||||
# template.status = Download Complete
|
||||
# Downloading - x% Downloaded
|
||||
# Error - Any other string
|
||||
# Error - Any other string
|
||||
if template.status == 'Download Complete':
|
||||
break
|
||||
|
||||
|
||||
elif 'Downloaded' in template.status:
|
||||
time.sleep(interval)
|
||||
|
||||
elif 'Installing' not in template.status:
|
||||
raise Exception("ErrorInDownload")
|
||||
|
||||
|
||||
return
|
||||
|
||||
def update_resource_limit(apiclient, resourcetype, account=None, domainid=None,
|
||||
max=None, projectid=None):
|
||||
|
||||
def update_resource_limit(apiclient, resourcetype, account=None,
|
||||
domainid=None, max=None, projectid=None):
|
||||
"""Updates the resource limit to 'max' for given account"""
|
||||
|
||||
cmd = updateResourceLimit.updateResourceLimitCmd()
|
||||
|
|
@ -252,6 +261,7 @@ def update_resource_limit(apiclient, resourcetype, account=None, domainid=None,
|
|||
apiclient.updateResourceLimit(cmd)
|
||||
return
|
||||
|
||||
|
||||
def list_routers(apiclient, **kwargs):
|
||||
"""List all Routers matching criteria"""
|
||||
|
||||
|
|
@ -259,6 +269,7 @@ def list_routers(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listRouters(cmd))
|
||||
|
||||
|
||||
def list_zones(apiclient, **kwargs):
|
||||
"""List all Zones matching criteria"""
|
||||
|
||||
|
|
@ -266,6 +277,7 @@ def list_zones(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listZones(cmd))
|
||||
|
||||
|
||||
def list_networks(apiclient, **kwargs):
|
||||
"""List all Networks matching criteria"""
|
||||
|
||||
|
|
@ -273,6 +285,7 @@ def list_networks(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listNetworks(cmd))
|
||||
|
||||
|
||||
def list_clusters(apiclient, **kwargs):
|
||||
"""List all Clusters matching criteria"""
|
||||
|
||||
|
|
@ -280,6 +293,7 @@ def list_clusters(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listClusters(cmd))
|
||||
|
||||
|
||||
def list_ssvms(apiclient, **kwargs):
|
||||
"""List all SSVMs matching criteria"""
|
||||
|
||||
|
|
@ -287,6 +301,7 @@ def list_ssvms(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listSystemVms(cmd))
|
||||
|
||||
|
||||
def list_storage_pools(apiclient, **kwargs):
|
||||
"""List all storage pools matching criteria"""
|
||||
|
||||
|
|
@ -294,6 +309,7 @@ def list_storage_pools(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listStoragePools(cmd))
|
||||
|
||||
|
||||
def list_virtual_machines(apiclient, **kwargs):
|
||||
"""List all VMs matching criteria"""
|
||||
|
||||
|
|
@ -301,6 +317,7 @@ def list_virtual_machines(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVirtualMachines(cmd))
|
||||
|
||||
|
||||
def list_hosts(apiclient, **kwargs):
|
||||
"""List all Hosts matching criteria"""
|
||||
|
||||
|
|
@ -308,6 +325,7 @@ def list_hosts(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listHosts(cmd))
|
||||
|
||||
|
||||
def list_configurations(apiclient, **kwargs):
|
||||
"""List configuration with specified name"""
|
||||
|
||||
|
|
@ -315,6 +333,7 @@ def list_configurations(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listConfigurations(cmd))
|
||||
|
||||
|
||||
def list_publicIP(apiclient, **kwargs):
|
||||
"""List all Public IPs matching criteria"""
|
||||
|
||||
|
|
@ -322,6 +341,7 @@ def list_publicIP(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listPublicIpAddresses(cmd))
|
||||
|
||||
|
||||
def list_nat_rules(apiclient, **kwargs):
|
||||
"""List all NAT rules matching criteria"""
|
||||
|
||||
|
|
@ -329,6 +349,7 @@ def list_nat_rules(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listPortForwardingRules(cmd))
|
||||
|
||||
|
||||
def list_lb_rules(apiclient, **kwargs):
|
||||
"""List all Load balancing rules matching criteria"""
|
||||
|
||||
|
|
@ -336,6 +357,7 @@ def list_lb_rules(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listLoadBalancerRules(cmd))
|
||||
|
||||
|
||||
def list_lb_instances(apiclient, **kwargs):
|
||||
"""List all Load balancing instances matching criteria"""
|
||||
|
||||
|
|
@ -343,6 +365,7 @@ def list_lb_instances(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listLoadBalancerRuleInstances(cmd))
|
||||
|
||||
|
||||
def list_firewall_rules(apiclient, **kwargs):
|
||||
"""List all Firewall Rules matching criteria"""
|
||||
|
||||
|
|
@ -350,6 +373,7 @@ def list_firewall_rules(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listFirewallRules(cmd))
|
||||
|
||||
|
||||
def list_volumes(apiclient, **kwargs):
|
||||
"""List all volumes matching criteria"""
|
||||
|
||||
|
|
@ -357,6 +381,7 @@ def list_volumes(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVolumes(cmd))
|
||||
|
||||
|
||||
def list_isos(apiclient, **kwargs):
|
||||
"""Lists all available ISO files."""
|
||||
|
||||
|
|
@ -364,6 +389,7 @@ def list_isos(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listIsos(cmd))
|
||||
|
||||
|
||||
def list_snapshots(apiclient, **kwargs):
|
||||
"""List all snapshots matching criteria"""
|
||||
|
||||
|
|
@ -371,6 +397,7 @@ def list_snapshots(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listSnapshots(cmd))
|
||||
|
||||
|
||||
def list_templates(apiclient, **kwargs):
|
||||
"""List all templates matching criteria"""
|
||||
|
||||
|
|
@ -378,6 +405,7 @@ def list_templates(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listTemplates(cmd))
|
||||
|
||||
|
||||
def list_domains(apiclient, **kwargs):
|
||||
"""Lists domains"""
|
||||
|
||||
|
|
@ -385,6 +413,7 @@ def list_domains(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listDomains(cmd))
|
||||
|
||||
|
||||
def list_accounts(apiclient, **kwargs):
|
||||
"""Lists accounts and provides detailed account information for
|
||||
listed accounts"""
|
||||
|
|
@ -393,6 +422,7 @@ def list_accounts(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listAccounts(cmd))
|
||||
|
||||
|
||||
def list_users(apiclient, **kwargs):
|
||||
"""Lists users and provides detailed account information for
|
||||
listed users"""
|
||||
|
|
@ -401,6 +431,7 @@ def list_users(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listUsers(cmd))
|
||||
|
||||
|
||||
def list_snapshot_policy(apiclient, **kwargs):
|
||||
"""Lists snapshot policies."""
|
||||
|
||||
|
|
@ -408,6 +439,7 @@ def list_snapshot_policy(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listSnapshotPolicies(cmd))
|
||||
|
||||
|
||||
def list_events(apiclient, **kwargs):
|
||||
"""Lists events"""
|
||||
|
||||
|
|
@ -415,6 +447,7 @@ def list_events(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listEvents(cmd))
|
||||
|
||||
|
||||
def list_disk_offering(apiclient, **kwargs):
|
||||
"""Lists all available disk offerings."""
|
||||
|
||||
|
|
@ -422,6 +455,7 @@ def list_disk_offering(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listDiskOfferings(cmd))
|
||||
|
||||
|
||||
def list_service_offering(apiclient, **kwargs):
|
||||
"""Lists all available service offerings."""
|
||||
|
||||
|
|
@ -429,6 +463,7 @@ def list_service_offering(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listServiceOfferings(cmd))
|
||||
|
||||
|
||||
def list_vlan_ipranges(apiclient, **kwargs):
|
||||
"""Lists all VLAN IP ranges."""
|
||||
|
||||
|
|
@ -436,6 +471,7 @@ def list_vlan_ipranges(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVlanIpRanges(cmd))
|
||||
|
||||
|
||||
def list_usage_records(apiclient, **kwargs):
|
||||
"""Lists usage records for accounts"""
|
||||
|
||||
|
|
@ -443,6 +479,7 @@ def list_usage_records(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listUsageRecords(cmd))
|
||||
|
||||
|
||||
def list_nw_service_prividers(apiclient, **kwargs):
|
||||
"""Lists Network service providers"""
|
||||
|
||||
|
|
@ -450,6 +487,7 @@ def list_nw_service_prividers(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listNetworkServiceProviders(cmd))
|
||||
|
||||
|
||||
def list_virtual_router_elements(apiclient, **kwargs):
|
||||
"""Lists Virtual Router elements"""
|
||||
|
||||
|
|
@ -457,6 +495,7 @@ def list_virtual_router_elements(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVirtualRouterElements(cmd))
|
||||
|
||||
|
||||
def list_network_offerings(apiclient, **kwargs):
|
||||
"""Lists network offerings"""
|
||||
|
||||
|
|
@ -464,9 +503,10 @@ def list_network_offerings(apiclient, **kwargs):
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listNetworkOfferings(cmd))
|
||||
|
||||
|
||||
def list_resource_limits(apiclient, **kwargs):
|
||||
"""Lists resource limits"""
|
||||
|
||||
cmd = listResourceLimits.listResourceLimitsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listResourceLimits(cmd))
|
||||
return(apiclient.listResourceLimits(cmd))
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
"""Utilities functions
|
||||
"""
|
||||
|
|
@ -28,11 +28,12 @@ import imaplib
|
|||
import email
|
||||
import datetime
|
||||
|
||||
|
||||
def restart_mgmt_server(server):
|
||||
"""Restarts the management server"""
|
||||
|
||||
try:
|
||||
# Get the SSH client
|
||||
# Get the SSH client
|
||||
ssh = is_server_ssh_ready(
|
||||
server["ipaddress"],
|
||||
server["port"],
|
||||
|
|
@ -49,9 +50,10 @@ def restart_mgmt_server(server):
|
|||
raise e
|
||||
return
|
||||
|
||||
|
||||
def fetch_latest_mail(services, from_mail):
|
||||
"""Fetch mail"""
|
||||
|
||||
|
||||
# Login to mail server to verify email
|
||||
mail = imaplib.IMAP4_SSL(services["server"])
|
||||
mail.login(
|
||||
|
|
@ -66,21 +68,22 @@ def fetch_latest_mail(services, from_mail):
|
|||
'search',
|
||||
None,
|
||||
'(SENTSINCE {date} HEADER FROM "{mail}")'.format(
|
||||
date=date,
|
||||
date=date,
|
||||
mail=from_mail
|
||||
)
|
||||
)
|
||||
# Return False if email is not present
|
||||
if data == []:
|
||||
return False
|
||||
|
||||
|
||||
latest_email_uid = data[0].split()[-1]
|
||||
result, data = mail.uid('fetch', latest_email_uid, '(RFC822)')
|
||||
raw_email = data[0][1]
|
||||
email_message = email.message_from_string(raw_email)
|
||||
result = get_first_text_block(email_message)
|
||||
return result
|
||||
|
||||
|
||||
|
||||
def get_first_text_block(email_message_instance):
|
||||
"""fetches first text block from the mail"""
|
||||
maintype = email_message_instance.get_content_maintype()
|
||||
|
|
@ -91,15 +94,18 @@ def get_first_text_block(email_message_instance):
|
|||
elif maintype == 'text':
|
||||
return email_message_instance.get_payload()
|
||||
|
||||
|
||||
def random_gen(size=6, chars=string.ascii_uppercase + string.digits):
|
||||
"""Generate Random Strings of variable length"""
|
||||
return ''.join(random.choice(chars) for x in range(size))
|
||||
|
||||
|
||||
def cleanup_resources(api_client, resources):
|
||||
"""Delete resources"""
|
||||
for obj in resources:
|
||||
obj.delete(api_client)
|
||||
|
||||
|
||||
def is_server_ssh_ready(ipaddress, port, username, password, retries=50):
|
||||
"""Return ssh handle else wait till sshd is running"""
|
||||
loop_cnt = retries
|
||||
|
|
@ -129,6 +135,7 @@ def format_volume_to_ext3(ssh_client, device="/dev/sda"):
|
|||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
|
||||
|
||||
def fetch_api_client(config_file='datacenterCfg'):
|
||||
"""Fetch the Cloudstack API Client"""
|
||||
config = configGenerator.get_setup_config(config_file)
|
||||
|
|
@ -146,6 +153,7 @@ def fetch_api_client(config_file='datacenterCfg'):
|
|||
)
|
||||
)
|
||||
|
||||
|
||||
def get_process_status(hostip, port, username, password, linklocalip, process):
|
||||
"""Double hop and returns a process status"""
|
||||
|
||||
|
|
@ -157,20 +165,22 @@ def get_process_status(hostip, port, username, password, linklocalip, process):
|
|||
password
|
||||
)
|
||||
ssh_command = "ssh -i ~/.ssh/id_rsa.cloud -ostricthostkeychecking=no "
|
||||
ssh_command = ssh_command + "-oUserKnownHostsFile=/dev/null -p 3922 %s %s" \
|
||||
% (linklocalip, process)
|
||||
ssh_command = ssh_command + \
|
||||
"-oUserKnownHostsFile=/dev/null -p 3922 %s %s" % (
|
||||
linklocalip,
|
||||
process)
|
||||
|
||||
# Double hop into router
|
||||
timeout = 5
|
||||
# Ensure the SSH login is successful
|
||||
while True:
|
||||
res = ssh.execute(ssh_command)
|
||||
|
||||
|
||||
if res[0] != "Host key verification failed.":
|
||||
break
|
||||
elif timeout == 0:
|
||||
break
|
||||
|
||||
|
||||
time.sleep(5)
|
||||
timeout = timeout - 1
|
||||
return res
|
||||
return res
|
||||
|
|
|
|||
|
|
@ -97,6 +97,7 @@ class Services:
|
|||
# Algorithm used for load balancing
|
||||
"privateport": 22,
|
||||
"publicport": 2222,
|
||||
"protocol": 'TCP'
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -131,12 +132,12 @@ class TestPublicIP(cloudstackTestCase):
|
|||
cls.services["network"]["zoneid"] = cls.zone.id
|
||||
|
||||
cls.network_offering = NetworkOffering.create(
|
||||
cls.api_client,
|
||||
cls.api_client,
|
||||
cls.services["network_offering"],
|
||||
)
|
||||
# Enable Network offering
|
||||
cls.network_offering.update(cls.api_client, state='Enabled')
|
||||
|
||||
|
||||
cls.services["network"]["networkoffering"] = cls.network_offering.id
|
||||
cls.account_network = Network.create(
|
||||
cls.api_client,
|
||||
|
|
@ -354,26 +355,26 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(src_nat_ip_addrs, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
|
||||
|
||||
# Check if VM is in Running state before creating NAT rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=self.virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
|
|
@ -384,6 +385,16 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
# Open up firewall port for SSH
|
||||
fw_rule = FireWallRule.create(
|
||||
self.apiclient,
|
||||
ipaddressid=src_nat_ip_addr.id,
|
||||
protocol=self.services["natrule"]["protocol"],
|
||||
cidrlist=['0.0.0.0/0'],
|
||||
startport=self.services["natrule"]["publicport"],
|
||||
endport=self.services["natrule"]["publicport"]
|
||||
)
|
||||
|
||||
#Create NAT rule
|
||||
nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -401,7 +412,7 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_nat_rule_response),
|
||||
0,
|
||||
|
|
@ -419,9 +430,9 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
self.virtual_machine.ipaddress,
|
||||
src_nat_ip_addr.ipaddress
|
||||
))
|
||||
|
||||
|
||||
self.virtual_machine.get_ssh_client(src_nat_ip_addr.ipaddress)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.fail(
|
||||
"SSH Access failed for %s: %s" % \
|
||||
|
|
@ -445,7 +456,7 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
self.debug(
|
||||
"SSHing into VM with IP address %s after NAT rule deletion" %
|
||||
self.virtual_machine.ipaddress)
|
||||
|
||||
|
||||
remoteSSHClient.remoteSSHClient(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.virtual_machine.ssh_port,
|
||||
|
|
@ -469,19 +480,19 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
self.services["server"]
|
||||
)
|
||||
self.cleanup.append(ip_address)
|
||||
|
||||
|
||||
# Check if VM is in Running state before creating NAT rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=self.virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
|
|
@ -492,7 +503,15 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
|
||||
# Open up firewall port for SSH
|
||||
fw_rule = FireWallRule.create(
|
||||
self.apiclient,
|
||||
ipaddressid=ip_address.ipaddress.id,
|
||||
protocol=self.services["natrule"]["protocol"],
|
||||
cidrlist=['0.0.0.0/0'],
|
||||
startport=self.services["natrule"]["publicport"],
|
||||
endport=self.services["natrule"]["publicport"]
|
||||
)
|
||||
#Create NAT rule
|
||||
nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -553,7 +572,7 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
self.debug(
|
||||
"SSHing into VM with IP address %s after NAT rule deletion" %
|
||||
self.virtual_machine.ipaddress)
|
||||
|
||||
|
||||
remoteSSHClient.remoteSSHClient(
|
||||
ip_address.ipaddress.ipaddress,
|
||||
self.virtual_machine.ssh_port,
|
||||
|
|
@ -614,6 +633,15 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
cls.account.account.domainid,
|
||||
cls.services["server"]
|
||||
)
|
||||
# Open up firewall port for SSH
|
||||
cls.fw_rule = FireWallRule.create(
|
||||
cls.api_client,
|
||||
ipaddressid=cls.non_src_nat_ip.ipaddress.id,
|
||||
protocol=cls.services["lbrule"]["protocol"],
|
||||
cidrlist=['0.0.0.0/0'],
|
||||
startport=cls.services["lbrule"]["publicport"],
|
||||
endport=cls.services["lbrule"]["publicport"]
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.account,
|
||||
cls.service_offering
|
||||
|
|
@ -653,20 +681,20 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
"Check list response returns a valid list"
|
||||
)
|
||||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
|
||||
|
||||
# Check if VM is in Running state before creating LB rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
|
|
@ -678,7 +706,7 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
|
||||
|
||||
#Create Load Balancer rule and assign VMs to rule
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -727,12 +755,12 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
0,
|
||||
"Check Load Balancer instances Rule in its List"
|
||||
)
|
||||
self.debug("lb_instance_rules Ids: %s, %s" % (
|
||||
self.debug("lb_instance_rules Ids: %s, %s" % (
|
||||
lb_instance_rules[0].id,
|
||||
lb_instance_rules[1].id
|
||||
))
|
||||
self.debug("VM ids: %s, %s" % (self.vm_1.id, self.vm_2.id))
|
||||
|
||||
self.debug("VM ids: %s, %s" % (self.vm_1.id, self.vm_2.id))
|
||||
|
||||
self.assertIn(
|
||||
lb_instance_rules[0].id,
|
||||
[self.vm_1.id, self.vm_2.id],
|
||||
|
|
@ -746,35 +774,35 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
)
|
||||
try:
|
||||
self.debug(
|
||||
"SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)"%
|
||||
"SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" %
|
||||
(self.vm_1.ipaddress, src_nat_ip_addr.ipaddress)
|
||||
)
|
||||
|
||||
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
|
||||
# If Round Robin Algorithm is chosen,
|
||||
# each ssh command should alternate between VMs
|
||||
hostnames = [ssh_1.execute("hostname")[0]]
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
(e, src_nat_ip_addr.ipaddress))
|
||||
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
|
||||
|
||||
try:
|
||||
self.debug("SSHing into IP address: %s after adding VMs (ID: %s , %s)" %
|
||||
self.debug("SSHing into IP address: %s after adding VMs (ID: %s , %s)" %
|
||||
(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.vm_1.id,
|
||||
self.vm_2.id
|
||||
))
|
||||
|
||||
|
||||
ssh_2 = remoteSSHClient.remoteSSHClient(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
|
|
@ -784,7 +812,7 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
hostnames.append(ssh_2.execute("hostname")[0])
|
||||
|
||||
except Exception as e:
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
(e, src_nat_ip_addr.ipaddress))
|
||||
|
||||
self.debug("Hostnames: %s" % str(hostnames))
|
||||
|
|
@ -802,23 +830,23 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
#SSH should pass till there is a last VM associated with LB rule
|
||||
lb_rule.remove(self.apiclient, [self.vm_2])
|
||||
try:
|
||||
self.debug("SSHing into IP address: %s after removing VM (ID: %s)" %
|
||||
self.debug("SSHing into IP address: %s after removing VM (ID: %s)" %
|
||||
(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.vm_2.id
|
||||
))
|
||||
|
||||
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
|
||||
hostnames.append(ssh_1.execute("hostname")[0])
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
(e, src_nat_ip_addr.ipaddress))
|
||||
|
||||
self.assertIn(
|
||||
|
|
@ -828,7 +856,7 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
)
|
||||
|
||||
lb_rule.remove(self.apiclient, [self.vm_1])
|
||||
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
self.debug("Removed all VMs, trying to SSH")
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
|
|
@ -848,20 +876,20 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
#2. attempt to ssh twice on the load balanced IP
|
||||
#3. verify using the hostname of the VM that
|
||||
# round robin is indeed happening as expected
|
||||
|
||||
|
||||
# Check if VM is in Running state before creating LB rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
|
|
@ -873,7 +901,7 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
|
||||
|
||||
#Create Load Balancer rule and assign VMs to rule
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -889,7 +917,7 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=lb_rule.id
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(lb_rules, list),
|
||||
True,
|
||||
|
|
@ -935,7 +963,7 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
"Check List Load Balancer instances Rules returns valid VM ID"
|
||||
)
|
||||
try:
|
||||
self.debug("SSHing into IP address: %s after adding VMs (ID: %s , %s)" %
|
||||
self.debug("SSHing into IP address: %s after adding VMs (ID: %s , %s)" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.vm_1.id,
|
||||
|
|
@ -951,10 +979,10 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
# If Round Robin Algorithm is chosen,
|
||||
# each ssh command should alternate between VMs
|
||||
hostnames = [ssh_1.execute("hostname")[0]]
|
||||
|
||||
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
|
||||
self.debug("SSHing again into IP address: %s with VMs (ID: %s , %s) added to LB rule" %
|
||||
|
||||
self.debug("SSHing again into IP address: %s with VMs (ID: %s , %s) added to LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.vm_1.id,
|
||||
|
|
@ -966,7 +994,7 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
|
||||
hostnames.append(ssh_2.execute("hostname")[0])
|
||||
self.debug("Hostnames after adding 2 VMs to LB rule: %s" % str(hostnames))
|
||||
self.assertIn(
|
||||
|
|
@ -982,8 +1010,8 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
|
||||
#SSH should pass till there is a last VM associated with LB rule
|
||||
lb_rule.remove(self.apiclient, [self.vm_2])
|
||||
|
||||
self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
|
||||
|
||||
self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.vm_2.id
|
||||
|
|
@ -994,11 +1022,11 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
|
||||
hostnames.append(ssh_1.execute("hostname")[0])
|
||||
self.debug("Hostnames after removing VM2: %s" % str(hostnames))
|
||||
except Exception as e:
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
(e, self.non_src_nat_ip.ipaddress.ipaddress))
|
||||
|
||||
self.assertIn(
|
||||
|
|
@ -1009,7 +1037,7 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
|
||||
lb_rule.remove(self.apiclient, [self.vm_1])
|
||||
with self.assertRaises(Exception):
|
||||
self.fail("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
|
||||
self.fail("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.vm_1.id
|
||||
|
|
@ -1060,7 +1088,7 @@ class TestRebootRouter(cloudstackTestCase):
|
|||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
|
||||
|
||||
src_nat_ip_addrs = list_publicIP(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
|
|
@ -1070,7 +1098,7 @@ class TestRebootRouter(cloudstackTestCase):
|
|||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during fetching source NAT: %s" % e)
|
||||
|
||||
|
||||
self.public_ip = PublicIPAddress.create(
|
||||
self.apiclient,
|
||||
self.vm_1.account,
|
||||
|
|
@ -1078,6 +1106,15 @@ class TestRebootRouter(cloudstackTestCase):
|
|||
self.vm_1.domainid,
|
||||
self.services["server"]
|
||||
)
|
||||
# Open up firewall port for SSH
|
||||
fw_rule = FireWallRule.create(
|
||||
self.apiclient,
|
||||
ipaddressid=self.public_ip.ipaddress.id,
|
||||
protocol=self.services["lbrule"]["protocol"],
|
||||
cidrlist=['0.0.0.0/0'],
|
||||
startport=self.services["lbrule"]["publicport"],
|
||||
endport=self.services["lbrule"]["publicport"]
|
||||
)
|
||||
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -1120,44 +1157,44 @@ class TestRebootRouter(cloudstackTestCase):
|
|||
True,
|
||||
"Check list routers returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
router = routers[0]
|
||||
|
||||
|
||||
self.debug("Rebooting the router (ID: %s)" % router.id)
|
||||
|
||||
|
||||
cmd = rebootRouter.rebootRouterCmd()
|
||||
cmd.id = router.id
|
||||
self.apiclient.rebootRouter(cmd)
|
||||
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.vm_1.id
|
||||
)
|
||||
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) in change service offering" % vm.id)
|
||||
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
#we should be able to SSH after successful reboot
|
||||
try:
|
||||
self.debug("SSH into VM (ID : %s ) after reboot" % self.vm_1.id)
|
||||
|
||||
|
||||
remoteSSHClient.remoteSSHClient(
|
||||
self.nat_rule.ipaddress,
|
||||
self.services["natrule"]["publicport"],
|
||||
|
|
@ -1258,20 +1295,30 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
"Check list response returns a valid list"
|
||||
)
|
||||
self.non_src_nat_ip = src_nat_ip_addrs[0]
|
||||
|
||||
|
||||
# Open up firewall port for SSH
|
||||
fw_rule = FireWallRule.create(
|
||||
self.apiclient,
|
||||
ipaddressid=self.non_src_nat_ip.id,
|
||||
protocol=self.services["lbrule"]["protocol"],
|
||||
cidrlist=['0.0.0.0/0'],
|
||||
startport=self.services["lbrule"]["publicport"],
|
||||
endport=self.services["lbrule"]["publicport"]
|
||||
)
|
||||
|
||||
# Check if VM is in Running state before creating LB rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
|
|
@ -1283,7 +1330,7 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
|
||||
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
self.services["lbrule"],
|
||||
|
|
@ -1291,9 +1338,9 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
self.account.account.name
|
||||
)
|
||||
lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])
|
||||
|
||||
|
||||
try:
|
||||
self.debug("SSHing into IP address: %s with VMs (ID: %s , %s) added to LB rule" %
|
||||
self.debug("SSHing into IP address: %s with VMs (ID: %s , %s) added to LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.vm_1.id,
|
||||
|
|
@ -1307,11 +1354,11 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
self.vm_1.password
|
||||
)
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.non_src_nat_ip.ipaddress)
|
||||
|
||||
|
||||
try:
|
||||
self.debug("SSHing again into IP address: %s with VMs (ID: %s , %s) added to LB rule" %
|
||||
self.debug("SSHing again into IP address: %s with VMs (ID: %s , %s) added to LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.vm_1.id,
|
||||
|
|
@ -1323,19 +1370,19 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
self.vm_2.username,
|
||||
self.vm_2.password
|
||||
)
|
||||
|
||||
|
||||
# If Round Robin Algorithm is chosen,
|
||||
# each ssh command should alternate between VMs
|
||||
res_1 = ssh_1.execute("hostname")[0]
|
||||
self.debug(res_1)
|
||||
|
||||
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
|
||||
|
||||
res_2 = ssh_2.execute("hostname")[0]
|
||||
self.debug(res_2)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.non_src_nat_ip.ipaddress)
|
||||
|
||||
self.assertIn(
|
||||
|
|
@ -1351,9 +1398,9 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
|
||||
#Removing VM and assigning another VM to LB rule
|
||||
lb_rule.remove(self.apiclient, [self.vm_2])
|
||||
|
||||
|
||||
try:
|
||||
self.debug("SSHing again into IP address: %s with VM (ID: %s) added to LB rule" %
|
||||
self.debug("SSHing again into IP address: %s with VM (ID: %s) added to LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.vm_1.id,
|
||||
|
|
@ -1364,14 +1411,14 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
self.services["lbrule"]["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
)
|
||||
res_1 = ssh_1.execute("hostname")[0]
|
||||
self.debug(res_1)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.non_src_nat_ip.ipaddress)
|
||||
|
||||
|
||||
self.assertIn(
|
||||
self.vm_1.name,
|
||||
res_1,
|
||||
|
|
@ -1379,7 +1426,7 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
)
|
||||
|
||||
lb_rule.assign(self.apiclient, [self.vm_3])
|
||||
|
||||
|
||||
try:
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
|
|
@ -1393,19 +1440,19 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
self.vm_3.username,
|
||||
self.vm_3.password
|
||||
)
|
||||
|
||||
|
||||
res_1 = ssh_1.execute("hostname")[0]
|
||||
self.debug(res_1)
|
||||
|
||||
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
|
||||
|
||||
res_3 = ssh_3.execute("hostname")[0]
|
||||
self.debug(res_3)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.non_src_nat_ip.ipaddress)
|
||||
|
||||
|
||||
self.assertIn(
|
||||
self.vm_1.name,
|
||||
res_1,
|
||||
|
|
@ -1478,7 +1525,7 @@ class TestReleaseIP(cloudstackTestCase):
|
|||
except Exception as e:
|
||||
raise Exception("Failed: During acquiring source NAT for account: %s" %
|
||||
self.account.account.name)
|
||||
|
||||
|
||||
self.nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
self.virtual_machine,
|
||||
|
|
@ -1502,21 +1549,21 @@ class TestReleaseIP(cloudstackTestCase):
|
|||
|
||||
def test_releaseIP(self):
|
||||
"""Test for Associate/Disassociate public IP address"""
|
||||
|
||||
|
||||
self.debug("Deleting Public IP : %s" % self.ip_addr.id)
|
||||
|
||||
|
||||
self.ip_address.delete(self.apiclient)
|
||||
|
||||
|
||||
# Sleep to ensure that deleted state is reflected in other calls
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
|
||||
# ListPublicIpAddresses should not list deleted Public IP address
|
||||
list_pub_ip_addr_resp = list_publicIP(
|
||||
self.apiclient,
|
||||
id=self.ip_addr.id
|
||||
)
|
||||
self.debug("List Public IP response" + str(list_pub_ip_addr_resp))
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
list_pub_ip_addr_resp,
|
||||
None,
|
||||
|
|
@ -1543,7 +1590,7 @@ class TestReleaseIP(cloudstackTestCase):
|
|||
id=self.lb_rule.id
|
||||
)
|
||||
self.debug("List LB Rule response" + str(list_lb_rule))
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
list_lb_rule,
|
||||
None,
|
||||
|
|
@ -1603,12 +1650,12 @@ class TestDeleteAccount(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
src_nat_ip_addr.ipaddress)
|
||||
|
||||
self.lb_rule = LoadBalancerRule.create(
|
||||
|
|
@ -1618,7 +1665,7 @@ class TestDeleteAccount(cloudstackTestCase):
|
|||
self.account.account.name
|
||||
)
|
||||
self.lb_rule.assign(self.apiclient, [self.vm_1])
|
||||
|
||||
|
||||
self.nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
self.vm_1,
|
||||
|
|
@ -1666,14 +1713,14 @@ class TestDeleteAccount(cloudstackTestCase):
|
|||
"Check load balancing rule is properly deleted."
|
||||
)
|
||||
except Exception as e:
|
||||
|
||||
|
||||
raise Exception(
|
||||
"Exception raised while fetching LB rules for account: %s" %
|
||||
self.account.account.name)
|
||||
# ListPortForwardingRules should not
|
||||
# list associated rules with deleted account
|
||||
try:
|
||||
list_nat_reponse= list_nat_rules(
|
||||
list_nat_reponse = list_nat_rules(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
|
|
@ -1684,7 +1731,7 @@ class TestDeleteAccount(cloudstackTestCase):
|
|||
"Check load balancing rule is properly deleted."
|
||||
)
|
||||
except Exception as e:
|
||||
|
||||
|
||||
raise Exception(
|
||||
"Exception raised while fetching NAT rules for account: %s" %
|
||||
self.account.account.name)
|
||||
|
|
@ -1701,7 +1748,7 @@ class TestDeleteAccount(cloudstackTestCase):
|
|||
"Check routers are properly deleted."
|
||||
)
|
||||
except Exception as e:
|
||||
|
||||
|
||||
raise Exception(
|
||||
"Exception raised while fetching routers for account: %s" %
|
||||
self.account.account.name)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ from marvin.remoteSSHClient import remoteSSHClient
|
|||
from integration.lib.utils import *
|
||||
from integration.lib.base import *
|
||||
from integration.lib.common import *
|
||||
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
|
|
@ -57,7 +56,7 @@ class Services:
|
|||
"username": "testuser",
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"ostypeid":'5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"ostypeid":'946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"mode": 'advanced', #Networking mode: Basic, Advanced
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@
|
|||
import marvin
|
||||
from marvin.cloudstackTestCase import *
|
||||
from marvin.cloudstackAPI import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
from integration.lib.utils import *
|
||||
from integration.lib.base import *
|
||||
from integration.lib.common import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
|
||||
|
||||
class Services:
|
||||
|
|
@ -1017,7 +1017,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
self.services["sub_lvl_dir2"],
|
||||
self.services["random_data"]
|
||||
),
|
||||
"sync",
|
||||
"sync",
|
||||
]
|
||||
|
||||
for c in cmds:
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
import marvin
|
||||
from marvin.cloudstackTestCase import *
|
||||
from marvin.cloudstackAPI import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
from integration.lib.utils import *
|
||||
from integration.lib.base import *
|
||||
from integration.lib.common import *
|
||||
|
|
@ -69,12 +70,12 @@ class Services:
|
|||
"template_1": {
|
||||
"displaytext": "Cent OS Template",
|
||||
"name": "Cent OS Template",
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"ostypeid": '946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
|
||||
},
|
||||
"template_2": {
|
||||
"displaytext": "Public Template",
|
||||
"name": "Public template",
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"ostypeid": '946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
|
||||
"isfeatured": True,
|
||||
"ispublic": True,
|
||||
"isextractable": True,
|
||||
|
|
@ -88,7 +89,7 @@ class Services:
|
|||
"isextractable": False,
|
||||
"bootable": True,
|
||||
"passwordenabled": True,
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"ostypeid": '946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
|
||||
"mode": 'advanced',
|
||||
# Networking mode: Advanced, basic
|
||||
"sleep": 30,
|
||||
|
|
|
|||
|
|
@ -107,6 +107,11 @@ class Services:
|
|||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"mode": 'HTTP_DOWNLOAD', # Downloading existing ISO
|
||||
},
|
||||
"template": {
|
||||
"displaytext": "Cent OS Template",
|
||||
"name": "Cent OS Template",
|
||||
"passwordenabled": True,
|
||||
},
|
||||
"diskdevice": '/dev/xvdd',
|
||||
# Disk device where ISO is attached to instance
|
||||
"mount_dir": "/mnt/tmp",
|
||||
|
|
@ -186,20 +191,20 @@ class TestDeployVM(cloudstackTestCase):
|
|||
"Verify listVirtualMachines response for virtual machine: %s" \
|
||||
% self.virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
"Check VM available in List Virtual Machines"
|
||||
)
|
||||
vm_response = list_vm_response[0]
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
|
||||
vm_response.id,
|
||||
|
|
@ -323,7 +328,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
|
|
@ -348,7 +353,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# Validate the following
|
||||
# 1. listVM command should return this VM.State
|
||||
# of this VM should be Running".
|
||||
|
||||
|
||||
self.debug("Starting VM - ID: %s" % self.virtual_machine.id)
|
||||
self.small_virtual_machine.start(self.apiclient)
|
||||
|
||||
|
|
@ -361,7 +366,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
|
|
@ -400,7 +405,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
|
|
@ -423,39 +428,39 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# this Vm matches the one specified for "Small" service offering.
|
||||
# 2. Using listVM command verify that this Vm
|
||||
# has Small service offering Id.
|
||||
|
||||
|
||||
self.debug("Stopping VM - ID: %s" % self.medium_virtual_machine.id)
|
||||
|
||||
|
||||
self.medium_virtual_machine.stop(self.apiclient)
|
||||
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.medium_virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" % vm.id)
|
||||
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug("Change Service offering VM - ID: %s" %
|
||||
|
||||
self.debug("Change Service offering VM - ID: %s" %
|
||||
self.medium_virtual_machine.id)
|
||||
|
||||
|
||||
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
|
||||
cmd.id = self.medium_virtual_machine.id
|
||||
cmd.serviceofferingid = self.small_offering.id
|
||||
|
|
@ -463,32 +468,32 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
|
||||
self.debug("Starting VM - ID: %s" % self.medium_virtual_machine.id)
|
||||
self.medium_virtual_machine.start(self.apiclient)
|
||||
|
||||
|
||||
# Poll listVM to ensure VM is started properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
|
||||
# Ensure that VM is in running state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.medium_virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) after changing service offering" % vm.id)
|
||||
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
|
||||
try:
|
||||
ssh = self.medium_virtual_machine.get_ssh_client()
|
||||
except Exception as e:
|
||||
|
|
@ -506,7 +511,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
meminfo = ssh.execute("cat /proc/meminfo")
|
||||
#MemTotal: 1017464 kB
|
||||
total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1]
|
||||
|
||||
|
||||
self.debug(
|
||||
"CPU count: %s, CPU Speed: %s, Mem Info: %s" % (
|
||||
cpu_cnt,
|
||||
|
|
@ -539,76 +544,76 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# this Vm matches the one specified for "Medium" service offering.
|
||||
# 2. Using listVM command verify that this Vm
|
||||
# has Medium service offering Id.
|
||||
|
||||
|
||||
self.debug("Stopping VM - ID: %s" % self.small_virtual_machine.id)
|
||||
self.small_virtual_machine.stop(self.apiclient)
|
||||
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" % vm.id)
|
||||
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug("Change service offering VM - ID: %s" %
|
||||
|
||||
self.debug("Change service offering VM - ID: %s" %
|
||||
self.small_virtual_machine.id)
|
||||
|
||||
|
||||
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
|
||||
cmd.id = self.small_virtual_machine.id
|
||||
cmd.serviceofferingid = self.medium_offering.id
|
||||
self.apiclient.changeServiceForVirtualMachine(cmd)
|
||||
|
||||
|
||||
self.debug("Starting VM - ID: %s" % self.small_virtual_machine.id)
|
||||
self.small_virtual_machine.start(self.apiclient)
|
||||
|
||||
|
||||
# Poll listVM to ensure VM is started properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
|
||||
# Ensure that VM is in running state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) after changing service offering" % vm.id)
|
||||
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
ssh_client = self.small_virtual_machine.get_ssh_client()
|
||||
except Exception as e:
|
||||
|
|
@ -616,7 +621,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
"SSH Access failed for %s: %s" % \
|
||||
(self.small_virtual_machine.ipaddress, e)
|
||||
)
|
||||
|
||||
|
||||
cpuinfo = ssh_client.execute("cat /proc/cpuinfo")
|
||||
|
||||
cpu_cnt = len([i for i in cpuinfo if "processor" in i])
|
||||
|
|
@ -626,7 +631,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
meminfo = ssh_client.execute("cat /proc/meminfo")
|
||||
#MemTotal: 1017464 kB
|
||||
total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1]
|
||||
|
||||
|
||||
self.debug(
|
||||
"CPU count: %s, CPU Speed: %s, Mem Info: %s" % (
|
||||
cpu_cnt,
|
||||
|
|
@ -644,7 +649,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
self.medium_offering.cpuspeed,
|
||||
"Check CPU Speed for medium offering"
|
||||
)
|
||||
|
||||
|
||||
self.assertAlmostEqual(
|
||||
int(total_mem) / 1024, # In MBs
|
||||
self.medium_offering.memory,
|
||||
|
|
@ -660,7 +665,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# 1. Should not be able to login to the VM.
|
||||
# 2. listVM command should return this VM.State
|
||||
# of this VM should be "Destroyed".
|
||||
|
||||
|
||||
self.debug("Destroy VM - ID: %s" % self.small_virtual_machine.id)
|
||||
self.small_virtual_machine.delete(self.apiclient)
|
||||
|
||||
|
|
@ -673,7 +678,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
|
|
@ -695,9 +700,9 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# 1. listVM command should return this VM.
|
||||
# State of this VM should be "Stopped".
|
||||
# 2. We should be able to Start this VM successfully.
|
||||
|
||||
|
||||
self.debug("Recovering VM - ID: %s" % self.small_virtual_machine.id)
|
||||
|
||||
|
||||
cmd = recoverVirtualMachine.recoverVirtualMachineCmd()
|
||||
cmd.id = self.small_virtual_machine.id
|
||||
self.apiclient.recoverVirtualMachine(cmd)
|
||||
|
|
@ -711,7 +716,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
|
|
@ -734,21 +739,21 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# 2. listVM command should return this VM.State of this VM
|
||||
# should be "Running" and the host should be the host
|
||||
# to which the VM was migrated to
|
||||
|
||||
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
self.apiclient,
|
||||
zoneid=self.medium_virtual_machine.zoneid,
|
||||
type='Routing'
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check the number of hosts in the zone"
|
||||
)
|
||||
self.assertEqual(
|
||||
len(hosts),
|
||||
2,
|
||||
len(hosts),
|
||||
2,
|
||||
"Atleast 2 hosts should be present in a zone for VM migration"
|
||||
)
|
||||
|
||||
|
|
@ -757,12 +762,12 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
host = hosts[1]
|
||||
else:
|
||||
host = hosts[0]
|
||||
|
||||
|
||||
self.debug("Migrating VM-ID: %s to Host: %s" % (
|
||||
self.medium_virtual_machine.id,
|
||||
host.id
|
||||
))
|
||||
|
||||
|
||||
cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
|
||||
cmd.hostid = host.id
|
||||
cmd.virtualmachineid = self.medium_virtual_machine.id
|
||||
|
|
@ -777,7 +782,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
|
||||
self.assertNotEqual(
|
||||
list_vm_response,
|
||||
None,
|
||||
|
|
@ -804,9 +809,9 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
"""
|
||||
# Validate the following
|
||||
# 1. listVM command should NOT return this VM any more.
|
||||
|
||||
|
||||
self.debug("Expunge VM-ID: %s" % self.small_virtual_machine.id)
|
||||
|
||||
|
||||
cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
|
||||
cmd.id = self.small_virtual_machine.id
|
||||
self.apiclient.destroyVirtualMachine(cmd)
|
||||
|
|
@ -848,14 +853,14 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
|
||||
self.debug("Successfully created ISO with ID: %s" % iso.id)
|
||||
try:
|
||||
iso.download(self.apiclient)
|
||||
except Exception as e:
|
||||
self.fail("Exception while downloading ISO %s: %s"\
|
||||
% (iso.id, e))
|
||||
|
||||
|
||||
self.debug("Attach ISO with ID: %s to VM ID: %s" % (
|
||||
iso.id,
|
||||
self.virtual_machine.id
|
||||
|
|
@ -865,10 +870,10 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
cmd.id = iso.id
|
||||
cmd.virtualmachineid = self.virtual_machine.id
|
||||
self.apiclient.attachIso(cmd)
|
||||
|
||||
|
||||
try:
|
||||
ssh_client = self.virtual_machine.get_ssh_client()
|
||||
|
||||
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount -rt iso9660 %s %s" \
|
||||
|
|
@ -877,7 +882,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
self.services["mount_dir"]
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
for c in cmds:
|
||||
res = ssh_client.execute(c)
|
||||
|
||||
|
|
@ -888,9 +893,9 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
#Disk /dev/xvdd: 4393 MB, 4393723904 bytes
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
|
||||
# Res may contain more than one strings depending on environment
|
||||
# Split strings to form new list which is used for assertion on ISO size
|
||||
result = []
|
||||
|
|
@ -919,23 +924,23 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
#Unmount ISO
|
||||
command = "umount %s" % self.services["mount_dir"]
|
||||
ssh_client.execute(command)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
|
||||
#Detach from VM
|
||||
cmd = detachIso.detachIsoCmd()
|
||||
cmd.virtualmachineid = self.virtual_machine.id
|
||||
self.apiclient.detachIso(cmd)
|
||||
|
||||
|
||||
try:
|
||||
res = ssh_client.execute(c)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
|
||||
# Check if ISO is properly detached from VM (using fdisk)
|
||||
result = self.services["diskdevice"] in str(res)
|
||||
|
||||
|
|
@ -945,3 +950,174 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
"Check if ISO is detached from virtual machine"
|
||||
)
|
||||
return
|
||||
|
||||
@unittest.skip("Additional test")
|
||||
class TestVMPasswordEnabled(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.api_client = super(
|
||||
TestVMPasswordEnabled,
|
||||
cls
|
||||
).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
domain = get_domain(cls.api_client, cls.services)
|
||||
zone = get_zone(cls.api_client, cls.services)
|
||||
template = get_template(
|
||||
cls.api_client,
|
||||
zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
# Set Zones and disk offerings
|
||||
cls.services["small"]["zoneid"] = zone.id
|
||||
cls.services["small"]["template"] = template.id
|
||||
|
||||
# Create VMs, NAT Rules etc
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=domain.id
|
||||
)
|
||||
|
||||
cls.small_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offerings"]["small"]
|
||||
)
|
||||
|
||||
cls.virtual_machine = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services["small"],
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.small_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
#Stop virtual machine
|
||||
cls.virtual_machine.stop(cls.api_client)
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = cls.services["timeout"]
|
||||
while True:
|
||||
time.sleep(cls.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
cls.api_client,
|
||||
id=cls.virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" %
|
||||
vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
list_volume = list_volumes(
|
||||
cls.api_client,
|
||||
virtualmachineid=cls.virtual_machine.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
if isinstance(list_volume, list):
|
||||
cls.volume = list_volume[0]
|
||||
else:
|
||||
raise Exception(
|
||||
"Exception: Unable to find root volume foe VM: %s" %
|
||||
cls.virtual_machine.id)
|
||||
|
||||
cls.services["template"]["ostypeid"] = cls.services["ostypeid"]
|
||||
#Create templates for Edit, Delete & update permissions testcases
|
||||
cls.pw_enabled_template = Template.create(
|
||||
cls.api_client,
|
||||
cls.services["template"],
|
||||
cls.volume.id,
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
# Delete the VM - No longer needed
|
||||
cls.virtual_machine.delete(cls.api_client)
|
||||
cls.services["small"]["template"] = cls.pw_enabled_template.id
|
||||
|
||||
cls.vm = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services["small"],
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.small_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.small_offering,
|
||||
cls.pw_enabled_template,
|
||||
cls.account
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
# Cleanup VMs, templates etc.
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
#Clean up, terminate the created instances
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
return
|
||||
|
||||
def test_11_get_vm_password(self):
|
||||
"""Test get VM password for password enabled template"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Create an account
|
||||
# 2. Deploy VM with default service offering and "password enabled"
|
||||
# template. Vm should be in running state.
|
||||
# 3. Stop VM deployed in step 2
|
||||
# 4. Reset VM password. SSH with new password should be successful
|
||||
|
||||
self.debug("Stopping VM: %s" % self.vm.name)
|
||||
self.vm.stop(self.apiclient)
|
||||
|
||||
# Sleep to ensure VM is stopped properly
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
self.debug("Resetting VM password for VM: %s" % self.vm.name)
|
||||
password = self.vm.resetPassword(self.apiclient)
|
||||
self.debug("Password reset to: %s" % password)
|
||||
|
||||
self.debug("Starting VM to verify new password..")
|
||||
self.vm.start(self.apiclient)
|
||||
self.debug("VM - %s stated!" % self.vm.name)
|
||||
|
||||
vms = VirtualMachine.list(self.apiclient, id=self.vm.id, listall=True)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should retun valid response for VM: %s" % self.vm.name
|
||||
)
|
||||
virtual_machine = vms[0]
|
||||
|
||||
self.assertEqual(
|
||||
virtual_machine.state,
|
||||
"Running",
|
||||
"VM state should be running"
|
||||
)
|
||||
try:
|
||||
self.debug("SSHing into VM: %s" % self.vm.ssh_ip)
|
||||
self.vm.password = password
|
||||
ssh = self.vm.get_ssh_client()
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM: %s failed" % self.vm.ssh_ip)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@
|
|||
import marvin
|
||||
from marvin.cloudstackTestCase import *
|
||||
from marvin.cloudstackAPI import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
from integration.lib.utils import *
|
||||
from integration.lib.base import *
|
||||
from integration.lib.common import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
#Import System modules
|
||||
import os
|
||||
import urllib
|
||||
|
|
@ -70,7 +70,7 @@ class Services:
|
|||
"publicport": 22,
|
||||
"protocol": 'TCP',
|
||||
"diskdevice": "/dev/xvdb",
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"ostypeid": '946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
|
||||
"mode": 'advanced',
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
|
|
@ -514,4 +514,4 @@ class TestVolumes(cloudstackTestCase):
|
|||
None,
|
||||
"Check if volume exists in ListVolumes"
|
||||
)
|
||||
return
|
||||
return
|
||||
|
|
|
|||
|
|
@ -557,6 +557,11 @@ body.login {
|
|||
height: 540px;
|
||||
}
|
||||
|
||||
.install-wizard .step.intro.what-is-cloudplatform p {
|
||||
background: url(../images/bg-what-is-cloudplatform.png) no-repeat 50% 237px;
|
||||
height: 540px;
|
||||
}
|
||||
|
||||
/*** Diagram*/
|
||||
.install-wizard .diagram {
|
||||
width: 910px;
|
||||
|
|
@ -7989,7 +7994,7 @@ div.panel.ui-dialog div.list-view div.fixed-header {
|
|||
|
||||
.project-selector .button.cancel {
|
||||
color: #808080;
|
||||
background: url("../images/gradients.png") repeat scroll 0 -480px #B6B6B6;
|
||||
background: #B6B6B6 url("../images/gradients.png") repeat 0 -480px;
|
||||
border: 1px solid #AAAAAA;
|
||||
border-radius: 4px 4px 4px 4px;
|
||||
font-size: 13px;
|
||||
|
|
@ -7999,19 +8004,18 @@ div.panel.ui-dialog div.list-view div.fixed-header {
|
|||
color: #838181;
|
||||
/*+placement:shift 488px 9px;*/
|
||||
position: relative;
|
||||
left: 488px;
|
||||
top: 9px;
|
||||
left: 170px;
|
||||
top: -8px;
|
||||
margin: 19px 0 0 0px;
|
||||
width: 54px;
|
||||
|
||||
}
|
||||
|
||||
.project-selector .button.cancel:hover {
|
||||
color: #3A3A3A;
|
||||
|
||||
}
|
||||
|
||||
|
||||
/*** Resource management*/
|
||||
.project-dashboard .resources {
|
||||
}
|
||||
|
|
|
|||
Binary file not shown.
|
After Width: | Height: | Size: 32 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 32 KiB |
|
|
@ -382,11 +382,11 @@
|
|||
url: 'eula.' + g_lang + '.html',
|
||||
dataType: 'html',
|
||||
success: function(html) {
|
||||
$('title').html('CloudPlatform');
|
||||
document.title = 'CloudPlatform';
|
||||
cloudStack.uiCustom.login($.extend(loginArgs, { eula: html, hasLogo: true }));
|
||||
},
|
||||
error: function() {
|
||||
$('title').html('CloudStack');
|
||||
document.title = 'CloudStack';
|
||||
cloudStack.uiCustom.login(loginArgs);
|
||||
},
|
||||
beforeSend : function(XMLHttpResponse) {
|
||||
|
|
|
|||
|
|
@ -1079,27 +1079,17 @@
|
|||
//hide/show service fields upon guestIpType(Shared/Isolated) and zoneType(Advanced/Basic) ***** (begin) *****
|
||||
var serviceFieldsToHide = [];
|
||||
if($guestTypeField.val() == 'Shared') { //Shared network offering
|
||||
if (hasAdvancedZones) { //advanced zone
|
||||
serviceFieldsToHide = [
|
||||
'service.SourceNat.isEnabled',
|
||||
'service.StaticNat.isEnabled',
|
||||
'service.PortForwarding.isEnabled',
|
||||
'service.Lb.isEnabled',
|
||||
'service.Firewall.isEnabled',
|
||||
'service.Vpn.isEnabled'
|
||||
];
|
||||
}
|
||||
else { //basic zone
|
||||
serviceFieldsToHide = [
|
||||
'service.SourceNat.isEnabled',
|
||||
'service.PortForwarding.isEnabled',
|
||||
'service.Firewall.isEnabled',
|
||||
'service.Vpn.isEnabled'
|
||||
];
|
||||
}
|
||||
serviceFieldsToHide = [
|
||||
'service.SourceNat.isEnabled',
|
||||
'service.PortForwarding.isEnabled',
|
||||
'service.Firewall.isEnabled',
|
||||
'service.Vpn.isEnabled'
|
||||
];
|
||||
}
|
||||
else { //Isolated network offering (which supports all services)
|
||||
serviceFieldsToHide = [];
|
||||
else { //Isolated network offering
|
||||
serviceFieldsToHide = [
|
||||
'service.SecurityGroup.isEnabled'
|
||||
];
|
||||
}
|
||||
|
||||
//hide service fields that are included in serviceFieldsToHide
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@
|
|||
section: 'instances',
|
||||
filters: {
|
||||
all: { label: 'ui.listView.filters.all' },
|
||||
mine: { label: 'ui.listView.filters.mine' },
|
||||
running: { label: 'state.Running' },
|
||||
stopped: { label: 'state.Stopped' },
|
||||
destroyed: {
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@
|
|||
name: { label: 'label.name' },
|
||||
type: { label: 'label.type' },
|
||||
hypervisor: { label: 'label.hypervisor' },
|
||||
vmdisplayname: { label: 'label.vm.display.name' },
|
||||
vmdisplayname: { label: 'label.vm.display.name' }
|
||||
|
||||
/*
|
||||
state: {
|
||||
|
|
@ -207,6 +207,7 @@
|
|||
label: 'label.format',
|
||||
select: function(args) {
|
||||
var items = [];
|
||||
items.push({ id: 'RAW', description: 'RAW' });
|
||||
items.push({ id: 'VHD', description: 'VHD' });
|
||||
items.push({ id: 'OVA', description: 'OVA' });
|
||||
items.push({ id: 'QCOW2', description: 'QCOW2' });
|
||||
|
|
|
|||
|
|
@ -6079,6 +6079,8 @@
|
|||
hypervisor: {
|
||||
label: 'label.hypervisor',
|
||||
select: function(args) {
|
||||
var vSwitchEnabled = false;
|
||||
|
||||
$.ajax({
|
||||
url: createURL("listHypervisors"),
|
||||
dataType: "json",
|
||||
|
|
@ -6093,31 +6095,47 @@
|
|||
}
|
||||
});
|
||||
|
||||
// Check whether vSwitch capability is enabled
|
||||
$.ajax({
|
||||
url: createURL('listConfigurations'),
|
||||
data: {
|
||||
name: 'vmware.use.nexus.vswitch'
|
||||
},
|
||||
async: false,
|
||||
success: function(json) {
|
||||
if (json.listconfigurationsresponse.configuration[0].value == 'true') {
|
||||
vSwitchEnabled = true;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
args.$select.bind("change", function(event) {
|
||||
var $form = $(this).closest('form');
|
||||
if($(this).val() == "VMware") {
|
||||
var $vsmFields = $.merge(
|
||||
$form.find('.form-item[rel=vsmipaddress]'),
|
||||
$form.find('.form-item[rel=vsmusername]'),
|
||||
$form.find('.form-item[rel=vsmpassword]')
|
||||
);
|
||||
|
||||
if ($(this).val() == "VMware") {
|
||||
//$('li[input_sub_group="external"]', $dialogAddCluster).show();
|
||||
$form.find('.form-item[rel=vCenterHost]').css('display', 'inline-block');
|
||||
$form.find('.form-item[rel=vCenterUsername]').css('display', 'inline-block');
|
||||
$form.find('.form-item[rel=vCenterPassword]').css('display', 'inline-block');
|
||||
$form.find('.form-item[rel=vCenterDatacenter]').css('display', 'inline-block');
|
||||
$form.find('.form-item[rel=enableNexusVswitch]').css('display', 'inline-block');
|
||||
|
||||
//$("#cluster_name_label", $dialogAddCluster).text("vCenter Cluster:");
|
||||
}
|
||||
else {
|
||||
//$('li[input_group="vmware"]', $dialogAddCluster).hide();
|
||||
if (vSwitchEnabled) {
|
||||
$vsmFields.css('display', 'inline-block');
|
||||
} else {
|
||||
$vsmFields.css('display', 'none');
|
||||
}
|
||||
} else {
|
||||
$form.find('.form-item[rel=vCenterHost]').css('display', 'none');
|
||||
$form.find('.form-item[rel=vCenterUsername]').css('display', 'none');
|
||||
$form.find('.form-item[rel=vCenterPassword]').css('display', 'none');
|
||||
$form.find('.form-item[rel=vCenterDatacenter]').css('display', 'none');
|
||||
$form.find('.form-item[rel=enableNexusVswitch]').css('display', 'none');
|
||||
$('.form-item[rel=enableNexusVswitch] input').attr('checked', false);
|
||||
$form.find('.form-item[rel=nexusVswitchIpAddress]').css('display', 'none');
|
||||
$form.find('.form-item[rel=nexusVswitchUsername]').css('display', 'none');
|
||||
$form.find('.form-item[rel=nexusVswitchPassword]').css('display', 'none');
|
||||
|
||||
//$("#cluster_name_label", $dialogAddCluster).text("Cluster:");
|
||||
$vsmFields.css('display', 'none');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
@ -6167,25 +6185,18 @@
|
|||
label: 'label.vcenter.datacenter',
|
||||
validation: { required: true }
|
||||
},
|
||||
enableNexusVswitch: {
|
||||
label: 'Add Nexus vSwitch',
|
||||
isBoolean: true
|
||||
},
|
||||
vsmipaddress: {
|
||||
label: 'vSwitch IP Address',
|
||||
dependsOn: 'enableNexusVswitch',
|
||||
validation: { required: true },
|
||||
isHidden: true
|
||||
},
|
||||
vsmusername: {
|
||||
label: 'vSwitch Username',
|
||||
dependsOn: 'enableNexusVswitch',
|
||||
validation: { required: true },
|
||||
isHidden: true
|
||||
},
|
||||
vsmpassword: {
|
||||
label: 'vSwitch Password',
|
||||
dependsOn: 'enableNexusVswitch',
|
||||
validation: { required: true },
|
||||
isPassword: true,
|
||||
isHidden: true
|
||||
|
|
|
|||
|
|
@ -20,6 +20,18 @@
|
|||
var launchStart; // Holds last launch callback, in case of error
|
||||
var $launchState;
|
||||
|
||||
// Checking if title should be ‘CloudStack’ or ‘CloudPlatform’ – replacing occurrences thereby
|
||||
var checkTitle = function(str) {
|
||||
// Getting the flag that indicates if EULA is present
|
||||
if (eulaHTML && eulaHTML.length) { return str.replace(/CloudStack/ig,'CloudPlatform'); }
|
||||
else { return str; }
|
||||
}
|
||||
|
||||
/* var data = $("p");
|
||||
$(data).each(function() {
|
||||
$(this).html(checkTitle($(this).html()));
|
||||
});
|
||||
*/
|
||||
/**
|
||||
* Successful installation action
|
||||
*/
|
||||
|
|
@ -39,7 +51,7 @@
|
|||
cloudStack.installWizard.copy[id]({
|
||||
response: {
|
||||
success: function(args) {
|
||||
$elem.append(_l(args.text));
|
||||
$elem.append(checkTitle(_l(args.text)));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
@ -87,9 +99,9 @@
|
|||
|
||||
var $intro = $('<div></div>').addClass('intro');
|
||||
var $title = $('<div></div>').addClass('title')
|
||||
.html(title);
|
||||
.html(checkTitle(title));
|
||||
var $subtitle = $('<div></div>').addClass('subtitle')
|
||||
.html(subtitle);
|
||||
.html(checkTitle(subtitle));
|
||||
var $copy = getCopy(copyID, $('<p></p>'));
|
||||
var $prev = elems.prevButton(_l('label.back'));
|
||||
var $continue = elems.nextButton('OK');
|
||||
|
|
@ -202,8 +214,8 @@
|
|||
tooltip: function(title, content) {
|
||||
return $('<div>').addClass('tooltip-info').append(
|
||||
$('<div>').addClass('arrow'),
|
||||
$('<div>').addClass('title').html(_l(title)),
|
||||
$('<div>').addClass('content').append($('<p>').html(_l(content)))
|
||||
$('<div>').addClass('title').html(checkTitle(_l(title))),
|
||||
$('<div>').addClass('content').append($('<p>').html(checkTitle(_l(content))))
|
||||
);
|
||||
},
|
||||
|
||||
|
|
@ -214,8 +226,8 @@
|
|||
return $('<div></div>').addClass('header')
|
||||
.append(
|
||||
$.merge(
|
||||
$('<h2></h2>').html(_l('label.installWizard.title')),
|
||||
$('<h3></h3>').html(_l('label.installWizard.subtitle'))
|
||||
$('<h2></h2>').html(checkTitle(_l('label.installWizard.title'))),
|
||||
$('<h3></h3>').html(checkTitle(_l('label.installWizard.subtitle')))
|
||||
)
|
||||
);
|
||||
},
|
||||
|
|
@ -297,8 +309,8 @@
|
|||
var steps = {
|
||||
eula: function(args) {
|
||||
var $intro = $('<div></div>').addClass('intro eula');
|
||||
var $title = $('<div></div>').addClass('title').html(_l('label.license.agreement'));
|
||||
var $subtitle = $('<div></div>').addClass('subtitle').html(_l('label.license.agreement.subtitle'));
|
||||
var $title = $('<div></div>').addClass('title').html(checkTitle(_l('label.license.agreement')));
|
||||
var $subtitle = $('<div></div>').addClass('subtitle').html(checkTitle(_l('label.license.agreement.subtitle')));
|
||||
var $copy = $('<div></div>').addClass('eula-copy').html(eulaHTML);
|
||||
var $continue = elems.nextButton(_l('label.agree'));
|
||||
|
||||
|
|
@ -314,12 +326,17 @@
|
|||
},
|
||||
|
||||
intro: function(args) {
|
||||
var $intro = $('<div></div>').addClass('intro what-is-cloudstack');
|
||||
var $title = $('<div></div>').addClass('title').html(_l('label.what.is.cloudstack'));
|
||||
var $subtitle = $('<div></div>').addClass('subtitle').html(_l('label.introduction.to.cloudstack'));
|
||||
if (eulaHTML && eulaHTML.length){
|
||||
var $intro = $('<div></div>').addClass('intro what-is-cloudplatform'); }
|
||||
else {
|
||||
var $intro = $('<div></div>').addClass('intro what-is-cloudstack');
|
||||
|
||||
}
|
||||
var $title = $('<div></div>').addClass('title').html(checkTitle(_l('label.what.is.cloudstack')));
|
||||
var $subtitle = $('<div></div>').addClass('subtitle').html(checkTitle(_l('label.introduction.to.cloudstack')));
|
||||
var $copy = getCopy('whatIsCloudStack', $('<p></p>'));
|
||||
var $continue = elems.nextButton(_l('label.continue.basic.install'));
|
||||
var $advanced = elems.nextButton(_l('label.skip.guide')).addClass('advanced-installation');
|
||||
var $advanced = elems.nextButton(checkTitle(_l('label.skip.guide'))).addClass('advanced-installation');
|
||||
|
||||
$continue.click(function() {
|
||||
goTo('changeUser');
|
||||
|
|
@ -792,6 +809,7 @@
|
|||
|
||||
var initialStep = eulaHTML ?
|
||||
steps.eula().addClass('step') : steps.intro().addClass('step');
|
||||
|
||||
|
||||
showDiagram('');
|
||||
$('html body').addClass('install-wizard');
|
||||
|
|
@ -801,7 +819,9 @@
|
|||
elems.body().append(initialStep),
|
||||
$diagramParts
|
||||
).appendTo($container);
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
|
||||
cloudStack.uiCustom.installWizard = installWizard;
|
||||
}(jQuery, cloudStack));
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ public class NetconfHelper {
|
|||
|
||||
public void addPolicyMap(String name, int averageRate, int maxRate, int burstRate)
|
||||
throws CloudRuntimeException {
|
||||
String command = VsmCommand.getPolicyMap(name, averageRate, maxRate, burstRate);
|
||||
String command = VsmCommand.getAddPolicyMap(name, averageRate, maxRate, burstRate);
|
||||
if (command != null) {
|
||||
command = command.concat(SSH_NETCONF_TERMINATOR);
|
||||
send(command);
|
||||
|
|
@ -180,18 +180,39 @@ public class NetconfHelper {
|
|||
}
|
||||
}
|
||||
|
||||
public void getPortProfileByName(String name) throws CloudRuntimeException {
|
||||
public PortProfile getPortProfileByName(String name) throws CloudRuntimeException {
|
||||
String command = VsmCommand.getPortProfile(name);
|
||||
if (command != null) {
|
||||
command = command.concat(SSH_NETCONF_TERMINATOR);
|
||||
send(command);
|
||||
// parse the rpc reply.
|
||||
VsmPortProfileResponse response = new VsmPortProfileResponse(receive().trim());
|
||||
String received = receive();
|
||||
VsmPortProfileResponse response = new VsmPortProfileResponse(received.trim());
|
||||
if (!response.isResponseOk()) {
|
||||
throw new CloudRuntimeException("Error response while getting the port profile details.");
|
||||
} else {
|
||||
return response.getPortProfile();
|
||||
}
|
||||
} else {
|
||||
throw new CloudRuntimeException("Error generating rpc request for removing policy map.");
|
||||
throw new CloudRuntimeException("Error generating rpc request for getting port profile.");
|
||||
}
|
||||
}
|
||||
|
||||
public PolicyMap getPolicyMapByName(String name) throws CloudRuntimeException {
|
||||
String command = VsmCommand.getPolicyMap(name);
|
||||
if (command != null) {
|
||||
command = command.concat(SSH_NETCONF_TERMINATOR);
|
||||
send(command);
|
||||
// parse the rpc reply.
|
||||
String received = receive();
|
||||
VsmPolicyMapResponse response = new VsmPolicyMapResponse(received.trim());
|
||||
if (!response.isResponseOk()) {
|
||||
throw new CloudRuntimeException("Error response while getting the port profile details.");
|
||||
} else {
|
||||
return response.getPolicyMap();
|
||||
}
|
||||
} else {
|
||||
throw new CloudRuntimeException("Error generating rpc request for getting policy map.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
package com.cloud.utils.cisco.n1kv.vsm;
|
||||
|
||||
public class PolicyMap {
|
||||
public String policyMapName;
|
||||
public int committedRate;
|
||||
public int burstRate;
|
||||
public int peakRate;
|
||||
|
||||
PolicyMap() {
|
||||
policyMapName = null;
|
||||
committedRate = 0;
|
||||
burstRate = 0;
|
||||
peakRate = 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
package com.cloud.utils.cisco.n1kv.vsm;
|
||||
|
||||
import com.cloud.utils.cisco.n1kv.vsm.VsmCommand.BindingType;
|
||||
import com.cloud.utils.cisco.n1kv.vsm.VsmCommand.PortProfileType;
|
||||
import com.cloud.utils.cisco.n1kv.vsm.VsmCommand.SwitchPortMode;
|
||||
|
||||
public class PortProfile {
|
||||
public PortProfileType type;
|
||||
public SwitchPortMode mode;
|
||||
public BindingType binding;
|
||||
public String profileName;
|
||||
public String inputPolicyMap;
|
||||
public String outputPolicyMap;
|
||||
public String vlan;
|
||||
public boolean status;
|
||||
public int maxPorts;
|
||||
|
||||
PortProfile() {
|
||||
profileName = null;
|
||||
inputPolicyMap = null;
|
||||
outputPolicyMap = null;
|
||||
vlan = null;
|
||||
status = false;
|
||||
maxPorts = 32;
|
||||
type = PortProfileType.none;
|
||||
mode = SwitchPortMode.none;
|
||||
binding = BindingType.none;
|
||||
}
|
||||
}
|
||||
|
|
@ -79,10 +79,10 @@ public class VsmCommand {
|
|||
|
||||
return serialize(domImpl, doc);
|
||||
} catch (ParserConfigurationException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating add port profile message : " + e.getMessage());
|
||||
return null;
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating add port profile message : " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -113,10 +113,10 @@ public class VsmCommand {
|
|||
|
||||
return serialize(domImpl, doc);
|
||||
} catch (ParserConfigurationException e) {
|
||||
s_logger.error("Error while creating update message : " + e.getMessage());
|
||||
s_logger.error("Error while creating update port profile message : " + e.getMessage());
|
||||
return null;
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error while creating update message : " + e.getMessage());
|
||||
s_logger.error("Error while creating update port profile message : " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -146,15 +146,15 @@ public class VsmCommand {
|
|||
|
||||
return serialize(domImpl, doc);
|
||||
} catch (ParserConfigurationException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating delete port profile message : " + e.getMessage());
|
||||
return null;
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating delete port profile message : " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static String getPolicyMap(String name, int averageRate, int maxRate, int burstRate) {
|
||||
public static String getAddPolicyMap(String name, int averageRate, int maxRate, int burstRate) {
|
||||
try {
|
||||
// Create the document and root element.
|
||||
DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
|
||||
|
|
@ -179,10 +179,10 @@ public class VsmCommand {
|
|||
|
||||
return serialize(domImpl, doc);
|
||||
} catch (ParserConfigurationException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating policy map message : " + e.getMessage());
|
||||
return null;
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating policy map message : " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -212,10 +212,10 @@ public class VsmCommand {
|
|||
|
||||
return serialize(domImpl, doc);
|
||||
} catch (ParserConfigurationException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating delete policy map message : " + e.getMessage());
|
||||
return null;
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating delete policy map message : " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -245,10 +245,10 @@ public class VsmCommand {
|
|||
|
||||
return serialize(domImpl, doc);
|
||||
} catch (ParserConfigurationException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating attach/detach service policy message : " + e.getMessage());
|
||||
return null;
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating attach/detach service policy message : " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -282,10 +282,43 @@ public class VsmCommand {
|
|||
|
||||
return serialize(domImpl, doc);
|
||||
} catch (ParserConfigurationException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating the message to get port profile details: " + e.getMessage());
|
||||
return null;
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating the message to get port profile details: " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static String getPolicyMap(String name) {
|
||||
try {
|
||||
DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
|
||||
DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
|
||||
DOMImplementation domImpl = docBuilder.getDOMImplementation();
|
||||
Document doc = createDocument(domImpl);
|
||||
|
||||
Element get = doc.createElement("nf:get");
|
||||
doc.getDocumentElement().appendChild(get);
|
||||
|
||||
Element filter = doc.createElement("nf:filter");
|
||||
filter.setAttribute("type", "subtree");
|
||||
get.appendChild(filter);
|
||||
|
||||
// Create the show port-profile name <profile-name> command.
|
||||
Element show = doc.createElement("show");
|
||||
filter.appendChild(show);
|
||||
Element policyMap = doc.createElement("policy-map");
|
||||
show.appendChild(policyMap);
|
||||
Element nameNode = doc.createElement("name");
|
||||
nameNode.setTextContent(name);
|
||||
policyMap.appendChild(nameNode);
|
||||
|
||||
return serialize(domImpl, doc);
|
||||
} catch (ParserConfigurationException e) {
|
||||
s_logger.error("Error while creating the message to get policy map details : " + e.getMessage());
|
||||
return null;
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error while creating the message to get policy map details : " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -312,7 +345,7 @@ public class VsmCommand {
|
|||
s_logger.error("Error while creating hello message : " + e.getMessage());
|
||||
return null;
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error while creating delete message : " + e.getMessage());
|
||||
s_logger.error("Error while creating hello message : " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ public class VsmOkResponse extends VsmResponse {
|
|||
|
||||
VsmOkResponse(String response) {
|
||||
super(response);
|
||||
initialize();
|
||||
}
|
||||
|
||||
protected void parse(Element root) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,66 @@
|
|||
package com.cloud.utils.cisco.n1kv.vsm;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.w3c.dom.DOMException;
|
||||
import org.w3c.dom.Element;
|
||||
import org.w3c.dom.Node;
|
||||
import org.w3c.dom.NodeList;
|
||||
|
||||
public class VsmPolicyMapResponse extends VsmResponse{
|
||||
private static final Logger s_logger = Logger.getLogger(VsmPolicyMapResponse.class);
|
||||
private static final String s_policyMapDetails = "__XML__OPT_Cmd_show_policy-map___readonly__";
|
||||
|
||||
private PolicyMap _policyMap = new PolicyMap();
|
||||
|
||||
VsmPolicyMapResponse(String response) {
|
||||
super(response);
|
||||
initialize();
|
||||
}
|
||||
|
||||
public PolicyMap getPolicyMap() {
|
||||
return _policyMap;
|
||||
}
|
||||
|
||||
protected void parse(Element root) {
|
||||
NodeList list = root.getElementsByTagName("nf:rpc-error");
|
||||
if (list.getLength() == 0) {
|
||||
// No rpc-error tag; means response was ok.
|
||||
NodeList dataList = root.getElementsByTagName("nf:data");
|
||||
if (dataList.getLength() > 0) {
|
||||
parseData(dataList.item(0));
|
||||
_responseOk = true;
|
||||
}
|
||||
} else {
|
||||
super.parseError(list.item(0));
|
||||
_responseOk = false;
|
||||
}
|
||||
}
|
||||
|
||||
protected void parseData(Node data) {
|
||||
try {
|
||||
NodeList list = ((Element)data).getElementsByTagName(s_policyMapDetails);
|
||||
if (list.getLength() > 0) {
|
||||
NodeList readOnlyList = ((Element)list.item(0)).getElementsByTagName("__readonly__");
|
||||
Element readOnly = (Element)readOnlyList.item(0);
|
||||
|
||||
for (Node node = readOnly.getFirstChild();
|
||||
node != null; node = node.getNextSibling()) {
|
||||
String currentNode = node.getNodeName();
|
||||
String value = node.getTextContent();
|
||||
if ("pmap-name-out".equalsIgnoreCase(currentNode)) {
|
||||
_policyMap.policyMapName = value;
|
||||
} else if ("cir".equalsIgnoreCase(currentNode)) {
|
||||
_policyMap.committedRate = Integer.parseInt(value.trim());
|
||||
} else if ("bc".equalsIgnoreCase(currentNode)) {
|
||||
_policyMap.burstRate = Integer.parseInt(value.trim());
|
||||
} else if ("pir".equalsIgnoreCase(currentNode)) {
|
||||
_policyMap.peakRate = Integer.parseInt(value.trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error parsing the response : " + e.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,22 +1,145 @@
|
|||
package com.cloud.utils.cisco.n1kv.vsm;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.w3c.dom.DOMException;
|
||||
import org.w3c.dom.Element;
|
||||
import org.w3c.dom.Node;
|
||||
import org.w3c.dom.NodeList;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
import com.cloud.utils.cisco.n1kv.vsm.VsmCommand.*;
|
||||
|
||||
public class VsmPortProfileResponse extends VsmResponse {
|
||||
private static final Logger s_logger = Logger.getLogger(VsmPortProfileResponse.class);
|
||||
private static final String s_portProfileDetails = "__XML__OPT_Cmd_show_port_profile___readonly__";
|
||||
|
||||
private PortProfile _portProfile = new PortProfile();
|
||||
|
||||
VsmPortProfileResponse(String response) {
|
||||
super(response);
|
||||
initialize();
|
||||
}
|
||||
|
||||
public PortProfile getPortProfile() {
|
||||
return _portProfile;
|
||||
}
|
||||
|
||||
protected void parse(Element root) {
|
||||
NodeList list = root.getElementsByTagName("nf:rpc-error");
|
||||
if (list.getLength() == 0) {
|
||||
// No rpc-error tag; means response was ok.
|
||||
assert(root.getElementsByTagName("nf:ok").getLength() > 0);
|
||||
_responseOk = true;
|
||||
NodeList dataList = root.getElementsByTagName("nf:data");
|
||||
if (dataList.getLength() > 0) {
|
||||
parseData(dataList.item(0));
|
||||
_responseOk = true;
|
||||
}
|
||||
} else {
|
||||
super.parseError(list.item(0));
|
||||
_responseOk = false;
|
||||
}
|
||||
}
|
||||
|
||||
protected void parseData(Node data) {
|
||||
try {
|
||||
NodeList list = ((Element)data).getElementsByTagName(s_portProfileDetails);
|
||||
if (list.getLength() > 0) {
|
||||
NodeList readOnlyList = ((Element)list.item(0)).getElementsByTagName("__readonly__");
|
||||
Element readOnly = (Element)readOnlyList.item(0);
|
||||
|
||||
for (Node node = readOnly.getFirstChild();
|
||||
node != null; node = node.getNextSibling()) {
|
||||
String currentNode = node.getNodeName();
|
||||
String value = node.getTextContent();
|
||||
if ("port_binding".equalsIgnoreCase(currentNode)) {
|
||||
setPortBinding(value);
|
||||
} else if ("profile_name".equalsIgnoreCase(currentNode)) {
|
||||
// Set the port profile name.
|
||||
_portProfile.profileName = value;
|
||||
} else if ("profile_cfg".equalsIgnoreCase(currentNode)) {
|
||||
setProfileConfiguration(value);
|
||||
} else if ("type".equalsIgnoreCase(currentNode)) {
|
||||
setPortType(value);
|
||||
} else if ("status".equalsIgnoreCase(currentNode)) {
|
||||
// Has the profile been enabled.
|
||||
if (value.equalsIgnoreCase("1")) {
|
||||
_portProfile.status = true;
|
||||
}
|
||||
} else if ("max_ports".equalsIgnoreCase(currentNode)) {
|
||||
// Has the profile been enabled.
|
||||
_portProfile.maxPorts = Integer.parseInt(value.trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (DOMException e) {
|
||||
s_logger.error("Error parsing the response : " + e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private void setProfileConfiguration(String value) {
|
||||
StringTokenizer tokens = new StringTokenizer(value.trim());
|
||||
if (tokens.hasMoreTokens()) {
|
||||
String currentToken = tokens.nextToken();
|
||||
if ("switchport".equalsIgnoreCase(currentToken)) {
|
||||
parseProfileMode(tokens);
|
||||
} else if ("service-policy".equalsIgnoreCase(currentToken)) {
|
||||
String ioType = tokens.nextToken();
|
||||
if ("input".equalsIgnoreCase(ioType)) {
|
||||
_portProfile.inputPolicyMap = tokens.nextToken();
|
||||
} else if ("output".equalsIgnoreCase(ioType)) {
|
||||
_portProfile.outputPolicyMap = tokens.nextToken();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void parseProfileMode(StringTokenizer tokens) {
|
||||
if (tokens.hasMoreTokens()) {
|
||||
String firstToken = tokens.nextToken();
|
||||
if ("mode".equalsIgnoreCase(firstToken)) {
|
||||
setPortMode(tokens.nextToken());
|
||||
} else if ("access".equalsIgnoreCase(firstToken)) {
|
||||
if (tokens.hasMoreTokens()) {
|
||||
String secondToken = tokens.nextToken();
|
||||
assert("vlan".equalsIgnoreCase(secondToken));
|
||||
if (tokens.hasMoreTokens()) {
|
||||
_portProfile.vlan = tokens.nextToken();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void setPortMode(String value) {
|
||||
// Set the mode for port profile.
|
||||
if ("access".equalsIgnoreCase(value)) {
|
||||
_portProfile.mode = SwitchPortMode.access;
|
||||
} else if ("trunk".equalsIgnoreCase(value)) {
|
||||
_portProfile.mode = SwitchPortMode.trunk;
|
||||
} else if ("privatevlanhost".equalsIgnoreCase(value)) {
|
||||
_portProfile.mode = SwitchPortMode.privatevlanhost;
|
||||
} else if ("privatevlanpromiscuous".equalsIgnoreCase(value)) {
|
||||
_portProfile.mode = SwitchPortMode.privatevlanpromiscuous;
|
||||
}
|
||||
}
|
||||
|
||||
private void setPortBinding(String value) {
|
||||
// Set the binding type for the port profile.
|
||||
if ("static".equalsIgnoreCase(value)) {
|
||||
_portProfile.binding = BindingType.portbindingstatic;
|
||||
} else if ("dynamic".equalsIgnoreCase(value)) {
|
||||
_portProfile.binding = BindingType.portbindingdynamic;
|
||||
} else if ("ephermal".equalsIgnoreCase(value)) {
|
||||
_portProfile.binding = BindingType.portbindingephermal;
|
||||
}
|
||||
}
|
||||
|
||||
private void setPortType(String value) {
|
||||
// Set the type field (vethernet/ethernet).
|
||||
if ("vethernet".equalsIgnoreCase(value)) {
|
||||
_portProfile.type = PortProfileType.vethernet;
|
||||
} else if ("ethernet".equalsIgnoreCase(value)) {
|
||||
_portProfile.type = PortProfileType.ethernet;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import javax.xml.parsers.ParserConfigurationException;
|
|||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Element;
|
||||
import org.w3c.dom.Node;
|
||||
import org.w3c.dom.NodeList;
|
||||
import org.w3c.dom.DOMException;
|
||||
import org.w3c.dom.ls.DOMImplementationLS;
|
||||
import org.w3c.dom.ls.LSSerializer;
|
||||
|
|
@ -73,7 +72,10 @@ public abstract class VsmResponse {
|
|||
_tag = ErrorTag.InUse;
|
||||
_type = ErrorType.rpc;
|
||||
_severity = ErrorSeverity.error;
|
||||
_docResponse = null;
|
||||
}
|
||||
|
||||
protected void initialize() {
|
||||
try {
|
||||
DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
|
||||
docFactory.setNamespaceAware(true);
|
||||
|
|
|
|||
Loading…
Reference in New Issue