Merge branch '3.0.x' of ssh://git.cloud.com/var/lib/git/cloudstack-oss into 3.0.x

This commit is contained in:
Sanjay Tripathi 2012-07-31 21:53:26 +05:30
commit b448412148
20 changed files with 507 additions and 81 deletions

View File

@ -39,5 +39,8 @@ public interface HypervisorCapabilities {
*/
Long getMaxGuestsLimit();
/**
* @return the max. data volumes supported by hypervisor
*/
Integer getMaxDataVolumesLimit();
}

View File

@ -26,7 +26,7 @@ file except in compliance with the License. Citrix Systems, Inc. -->
<target name="build-marvin" depends="build-apidocs" description="generates the cloudstackAPIs for marvin">
<echo message="Generating Marvin API Classes"/>
<exec dir="${marvin.src.dir}" executable="python">
<exec dir="${marvin.src.dir}" executable="python" failonerror="true">
<arg value="codegenerator.py" />
<arg value="-s" />
<arg value="${dist.dir}/commands.xml" />
@ -36,7 +36,7 @@ file except in compliance with the License. Citrix Systems, Inc. -->
<target name="package-marvin" depends="build-marvin" description="create a distributable tarball of Marvin">
<echo message="Packaging Marvin"/>
<exec dir="${marvin.dir}" executable="python">
<exec dir="${marvin.dir}" executable="python" failonerror="true">
<arg value="setup.py" />
<arg value="sdist" />
</exec>
@ -45,12 +45,11 @@ file except in compliance with the License. Citrix Systems, Inc. -->
<target name="install-marvin" depends="package-marvin" description="installs marvin on the local machine">
<echo message="Uninstalling Marvin" />
<exec dir="${marvin.dist.dir}" executable="sudo">
<arg line="pip uninstall -y marvin"/>
<exec dir="${marvin.dist.dir}" executable="pip">
<arg line="uninstall -y marvin"/>
</exec>
<echo message="Installing Marvin" />
<exec dir="${marvin.dist.dir}" executable="sudo">
<arg value="pip" />
<exec dir="${marvin.dist.dir}" executable="pip">
<arg value="install" />
<arg value="Marvin-0.1.0.tar.gz" />
</exec>

View File

@ -34,8 +34,9 @@
<property name="testclient.jar" value="cloud-test.jar" />
<property name="simulator.setup.dir" value="${base.dir}/setup/db/" />
<property name="simulator.tests.dir" value="${base.dir}/test/" />
<property name="marvin.sandbox.dir" value="${base.dir}/tools/marvin/marvin/sandbox/" />
<property name="marvin.config" value="${base.dir}/tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg" />
<property name="marvin.config.abspath" location="${marvin.config}" />
<!-- =================== Agent Simulator ==================== -->
<path id="agent-simulator.classpath">
@ -85,14 +86,13 @@
<copy todir="${agent-simulator.dist.dir}/conf">
<fileset dir="${agent.dir}/conf">
<include name="log4j-cloud.xml" />
<include name="simulator.properties" />
</fileset>
</copy>
</target>
<!-- =================== Agent Simulator ==================== -->
<!-- Run Simulator Tests -->
<target name="setup-simulator-tests" depends="clean-all, install-marvin">
<target name="setup-simulator" depends="clean-all">
<mkdir dir="${simulator.setup.dir}/override">
</mkdir>
<copy overwrite="true" file="${simulator.setup.dir}/templates.simulator.sql" tofile="${simulator.setup.dir}/override/templates.sql" />
@ -101,15 +101,15 @@
</mkdir>
<copy overwrite="true" file="${build.dir}/replace.properties" tofile="${build.dir}/override/replace.properties" />
<!-- Replace the COMPONENT-SPEC for components-simulator.xml -->
<copy overwrite="true" file="${simulator.tests.dir}/integration/smoke-simulator/simulator.properties" tofile="${build.dir}/override/replace.properties" />
<copy overwrite="true" file="${build.dir}/simulator.properties" tofile="${build.dir}/override/replace.properties" />
</target>
<target name="run-simulator" depends="setup-simulator-tests, build-all-with-simulator, deploy-server, deploydb-simulator, debug">
<target name="run-simulator" depends="setup-simulator, build-all-with-simulator, deploy-server, deploydb-simulator, debug">
</target>
<target name="run-simulator-tests">
<exec dir="test/" executable="bash">
<arg line="setup.sh -b ${base.dir}/test -t integration/smoke-simulator/ -c integration/smoke-simulator/simulator-smoke.cfg -d localhost -m localhost" />
<target name="run-marvin" depends="install-marvin">
<exec dir="${marvin.sandbox.dir}/" executable="bash">
<arg line="run-marvin.sh -d localhost -m localhost -c ${marvin.config.abspath}" />
</exec>
</target>
<!-- -->

View File

@ -227,7 +227,7 @@
</condition>
<echo message="deploydb ${server-setup.file} ${templates.file} ${DBROOTPW}" />
<exec dir="${db.scripts.dir}" executable="bash">
<exec dir="${db.scripts.dir}" executable="bash" failonerror="true">
<arg value="deploy-db-dev.sh" />
<arg value="${server-setup.file}" />
<arg value="${templates.file}" />
@ -255,7 +255,7 @@
</condition>
<echo message="deploydb ${server-setup.file} ${templates.file} ${DBROOTPW}" />
<exec dir="${db.scripts.dir}" executable="bash">
<exec dir="${db.scripts.dir}" executable="bash" failonerror="true">
<arg value="deploy-db-simulator.sh" />
<arg value="${server-setup.file}" />
<arg value="${templates.file}" />
@ -388,7 +388,7 @@
<property name="commands.vr.file" location="${dist.dir}/client/conf/virtualrouter_commands.properties" />
<echo message="build-apidocs" />
<exec dir="${apidoc.scripts.dir}" executable="bash">
<exec dir="${apidoc.scripts.dir}" executable="bash" failonerror="true">
<arg value="build-apidoc.sh" />
<arg value="${target.dir}/jar" />
<arg value="${deps.dir}" />

View File

@ -0,0 +1,11 @@
DBUSER=cloud
DBPW=cloud
MSLOG=vmops.log
APISERVERLOG=api.log
DBHOST=localhost
DBROOTPW=
AGENTLOGDIR=logs
AGENTLOG=logs/agent.log
MSMNTDIR=/mnt
COMPONENTS-SPEC=components-simulator.xml
AWSAPILOG=awsapi.log

View File

@ -51,6 +51,9 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities, Identit
@Column(name="uuid")
private String uuid;
@Column(name="max_data_volumes_limit")
private Integer maxDataVolumesLimit;
protected HypervisorCapabilitiesVO() {
this.uuid = UUID.randomUUID().toString();
}
@ -139,6 +142,15 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities, Identit
public void setUuid(String uuid) {
this.uuid = uuid;
}
@Override
public Integer getMaxDataVolumesLimit() {
return maxDataVolumesLimit;
}
public void setMaxDataVolumesLimit(Integer maxDataVolumesLimit) {
this.maxDataVolumesLimit = maxDataVolumesLimit;
}
@Override
public boolean equals(Object obj) {

View File

@ -24,6 +24,7 @@ public interface HypervisorCapabilitiesDao extends GenericDao<HypervisorCapabili
HypervisorCapabilitiesVO findByHypervisorTypeAndVersion(HypervisorType hypervisorType, String hypervisorVersion);
Long getMaxGuestsLimit(HypervisorType hypervisorType, String hypervisorVersion);
Long getMaxGuestsLimit(HypervisorType hypervisorType, String hypervisorVersion);
Integer getMaxDataVolumesLimit(HypervisorType hypervisorType, String hypervisorVersion);
}

View File

@ -32,7 +32,8 @@ public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase<HypervisorCapa
protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeSearch;
protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeAndVersionSearch;
protected final GenericSearchBuilder<HypervisorCapabilitiesVO, Long> MaxGuestLimitByHypervisorSearch;
protected final GenericSearchBuilder<HypervisorCapabilitiesVO, Long> MaxGuestLimitByHypervisorSearch;
protected final GenericSearchBuilder<HypervisorCapabilitiesVO, Integer> MaxDataVolumesLimitByHypervisorSearch;
private static final String DEFAULT_VERSION = "default";
@ -50,7 +51,13 @@ public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase<HypervisorCapa
MaxGuestLimitByHypervisorSearch.selectField(MaxGuestLimitByHypervisorSearch.entity().getMaxGuestsLimit());
MaxGuestLimitByHypervisorSearch.and("hypervisorType", MaxGuestLimitByHypervisorSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
MaxGuestLimitByHypervisorSearch.and("hypervisorVersion", MaxGuestLimitByHypervisorSearch.entity().getHypervisorVersion(), SearchCriteria.Op.EQ);
MaxGuestLimitByHypervisorSearch.done();
MaxGuestLimitByHypervisorSearch.done();
MaxDataVolumesLimitByHypervisorSearch = createSearchBuilder(Integer.class);
MaxDataVolumesLimitByHypervisorSearch.selectField(MaxDataVolumesLimitByHypervisorSearch.entity().getMaxDataVolumesLimit());
MaxDataVolumesLimitByHypervisorSearch.and("hypervisorType", MaxDataVolumesLimitByHypervisorSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
MaxDataVolumesLimitByHypervisorSearch.and("hypervisorVersion", MaxDataVolumesLimitByHypervisorSearch.entity().getHypervisorVersion(), SearchCriteria.Op.EQ);
MaxDataVolumesLimitByHypervisorSearch.done();
}
@Override
@ -99,5 +106,35 @@ public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase<HypervisorCapa
return defaultLimit;
}
return result;
}
@Override
public Integer getMaxDataVolumesLimit(HypervisorType hypervisorType, String hypervisorVersion) {
Integer result = null;
boolean useDefault = false;
if (hypervisorVersion != null) {
SearchCriteria<Integer> sc = MaxDataVolumesLimitByHypervisorSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
sc.setParameters("hypervisorVersion", hypervisorVersion);
List<Integer> limitList = customSearch(sc, null);
if (!limitList.isEmpty()) {
result = limitList.get(0);
} else {
useDefault = true;
}
} else {
useDefault = true;
}
// If data is not available for a specific hypervisor version then use 'default' as the version
if (useDefault) {
SearchCriteria<Integer> sc = MaxDataVolumesLimitByHypervisorSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
sc.setParameters("hypervisorVersion", DEFAULT_VERSION);
List<Integer> limitList = customSearch(sc, null);
if (!limitList.isEmpty()) {
result = limitList.get(0);
}
}
return result;
}
}

View File

@ -64,46 +64,59 @@ public class Upgrade302to303 implements DbUpgrade {
encryptConfig(conn);
}
// upgrades deployment with F5 and SRX devices, to 3.0's Network offerings & service providers paradigm
private void setupExternalNetworkDevices(Connection conn) {
PreparedStatement dcSearchStmt, pNetworkStmt, devicesStmt = null;
ResultSet dcResults, pNetworksResults, devicesResult = null;
PreparedStatement zoneSearchStmt = null, pNetworkStmt = null, f5DevicesStmt = null, srxDevicesStmt = null;
ResultSet zoneResults = null, pNetworksResults = null, f5DevicesResult = null, srxDevicesResult = null;
try {
dcSearchStmt = conn.prepareStatement("SELECT id, networktype FROM `cloud`.`data_center`");
dcResults = dcSearchStmt.executeQuery();
while (dcResults.next()) {
long zoneId = dcResults.getLong(1);
long f5HostId = 0;
long srxHostId = 0;
zoneSearchStmt = conn.prepareStatement("SELECT id, networktype FROM `cloud`.`data_center`");
zoneResults = zoneSearchStmt.executeQuery();
while (zoneResults.next()) {
long zoneId = zoneResults.getLong(1);
String networkType = zoneResults.getString(2);
String networkType = dcResults.getString(2);
if (NetworkType.Advanced.toString().equalsIgnoreCase(networkType)) {
if (!NetworkType.Advanced.toString().equalsIgnoreCase(networkType)) {
continue;
}
devicesStmt = conn.prepareStatement("SELECT id, type FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' OR type = 'ExternalFirewall'");
devicesStmt.setLong(1, zoneId);
devicesResult = devicesStmt.executeQuery();
pNetworkStmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where data_center_id=?");
pNetworkStmt.setLong(1, zoneId);
pNetworksResults = pNetworkStmt.executeQuery();
while (pNetworksResults.next()) {
long physicalNetworkId = pNetworksResults.getLong(1);
PreparedStatement fetchF5NspStmt = conn.prepareStatement("SELECT id from `cloud`.`physical_network_service_providers` where physical_network_id=" + physicalNetworkId
+ " and provider_name = 'F5BigIp'");
ResultSet rsF5NSP = fetchF5NspStmt.executeQuery();
boolean hasF5Nsp = rsF5NSP.next();
fetchF5NspStmt.close();
while (devicesResult.next()) {
String device = devicesResult.getString(2);
if (device.equals("ExternalLoadBalancer")) {
f5HostId = devicesResult.getLong(1);
} else if (device.equals("ExternalFirewall")) {
srxHostId = devicesResult.getLong(1);
}
}
// check if the deployment had F5 and SRX devices
if (f5HostId != 0 && srxHostId != 0) {
pNetworkStmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where data_center_id=?");
pNetworkStmt.setLong(1, zoneId);
pNetworksResults = pNetworkStmt.executeQuery();
if (pNetworksResults.first()) {
long physicalNetworkId = pNetworksResults.getLong(1);
if (!hasF5Nsp) {
f5DevicesStmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' AND removed IS NULL");
f5DevicesStmt.setLong(1, zoneId);
f5DevicesResult = f5DevicesStmt.executeQuery();
while (f5DevicesResult.next()) {
long f5HostId = f5DevicesResult.getLong(1);;
// add F5BigIP provider and provider instance to physical network
addF5ServiceProvider(conn, physicalNetworkId, zoneId);
addF5LoadBalancer(conn, f5HostId, physicalNetworkId);
}
}
PreparedStatement fetchSRXNspStmt = conn.prepareStatement("SELECT id from `cloud`.`physical_network_service_providers` where physical_network_id=" + physicalNetworkId
+ " and provider_name = 'JuniperSRX'");
ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery();
boolean hasSrxNsp = rsSRXNSP.next();
fetchSRXNspStmt.close();
if (!hasSrxNsp) {
srxDevicesStmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalFirewall' AND removed IS NULL");
srxDevicesStmt.setLong(1, zoneId);
srxDevicesResult = srxDevicesStmt.executeQuery();
while (srxDevicesResult.next()) {
long srxHostId = srxDevicesResult.getLong(1);
// add SRX provider and provider instance to physical network
addSrxServiceProvider(conn, physicalNetworkId, zoneId);
addSrxFirewall(conn, srxHostId, physicalNetworkId);
@ -111,15 +124,16 @@ public class Upgrade302to303 implements DbUpgrade {
}
}
}
if (dcResults != null) {
if (zoneResults != null) {
try {
dcResults.close();
zoneResults.close();
} catch (SQLException e) {
}
}
if (dcSearchStmt != null) {
if (zoneSearchStmt != null) {
try {
dcSearchStmt.close();
zoneSearchStmt.close();
} catch (SQLException e) {
}
}
@ -131,17 +145,16 @@ public class Upgrade302to303 implements DbUpgrade {
}
private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetworkId){
// add traffic types
PreparedStatement pstmtUpdate = null;
try{
s_logger.debug("Adding F5 Big IP load balancer with host id " + hostId);
s_logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId);
String insertF5 = "INSERT INTO `cloud`.`external_load_balancer_devices` (physical_network_id, host_id, provider_name, " +
"device_name, capacity, is_dedicated, device_state, allocation_state, is_inline, is_managed, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
pstmtUpdate = conn.prepareStatement(insertF5);
pstmtUpdate.setLong(1, physicalNetworkId);
pstmtUpdate.setLong(2, hostId);
pstmtUpdate.setString(3, "F5BigIp");
pstmtUpdate.setString(4, "F5BigIp");
pstmtUpdate.setString(4, "F5BigIpLoadBalancer");
pstmtUpdate.setLong(5, 0);
pstmtUpdate.setBoolean(6, false);
pstmtUpdate.setString(7, "Enabled");
@ -150,9 +163,8 @@ public class Upgrade302to303 implements DbUpgrade {
pstmtUpdate.setBoolean(10, false);
pstmtUpdate.setString(11, UUID.randomUUID().toString());
pstmtUpdate.executeUpdate();
pstmtUpdate.close();
}catch (SQLException e) {
throw new CloudRuntimeException("Exception while adding F5 load balancer due to", e);
throw new CloudRuntimeException("Exception while adding F5 load balancer device" , e);
} finally {
if (pstmtUpdate != null) {
try {
@ -164,26 +176,24 @@ public class Upgrade302to303 implements DbUpgrade {
}
private void addSrxFirewall(Connection conn, long hostId, long physicalNetworkId){
// add traffic types
PreparedStatement pstmtUpdate = null;
try{
s_logger.debug("Adding SRX firewall device with host id " + hostId);
s_logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId);
String insertSrx = "INSERT INTO `cloud`.`external_firewall_devices` (physical_network_id, host_id, provider_name, " +
"device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
"device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)";
pstmtUpdate = conn.prepareStatement(insertSrx);
pstmtUpdate.setLong(1, physicalNetworkId);
pstmtUpdate.setLong(2, hostId);
pstmtUpdate.setString(3, "JuniperSRX");
pstmtUpdate.setString(4, "JuniperSRX");
pstmtUpdate.setString(4, "JuniperSRXFirewall");
pstmtUpdate.setLong(5, 0);
pstmtUpdate.setBoolean(6, false);
pstmtUpdate.setString(7, "Enabled");
pstmtUpdate.setString(8, "Shared");
pstmtUpdate.setString(9, UUID.randomUUID().toString());
pstmtUpdate.executeUpdate();
pstmtUpdate.close();
}catch (SQLException e) {
throw new CloudRuntimeException("Exception while adding F5 load balancer due to", e);
throw new CloudRuntimeException("Exception while adding SRX firewall device ", e);
} finally {
if (pstmtUpdate != null) {
try {
@ -198,7 +208,7 @@ public class Upgrade302to303 implements DbUpgrade {
PreparedStatement pstmtUpdate = null;
try{
// add physical network service provider - F5BigIp
s_logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp");
s_logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId);
String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," +
"`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," +
"`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," +
@ -210,9 +220,8 @@ public class Upgrade302to303 implements DbUpgrade {
pstmtUpdate.setString(3, "F5BigIp");
pstmtUpdate.setString(4, "Enabled");
pstmtUpdate.executeUpdate();
pstmtUpdate.close();
}catch (SQLException e) {
throw new CloudRuntimeException("Exception while adding PhysicalNetworkServiceProvider F5BigIp ", e);
throw new CloudRuntimeException("Exception while adding PhysicalNetworkServiceProvider F5BigIp", e);
} finally {
if (pstmtUpdate != null) {
try {
@ -239,9 +248,8 @@ public class Upgrade302to303 implements DbUpgrade {
pstmtUpdate.setString(3, "JuniperSRX");
pstmtUpdate.setString(4, "Enabled");
pstmtUpdate.executeUpdate();
pstmtUpdate.close();
}catch (SQLException e) {
throw new CloudRuntimeException("Exception while adding PhysicalNetworkServiceProvider JuniperSRX ", e);
throw new CloudRuntimeException("Exception while adding PhysicalNetworkServiceProvider JuniperSRX" , e);
} finally {
if (pstmtUpdate != null) {
try {

View File

@ -508,6 +508,24 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
}
}
private int getMaxDataVolumesSupported(UserVmVO vm) {
Long hostId = vm.getHostId();
if (hostId == null) {
hostId = vm.getLastHostId();
}
HostVO host = _hostDao.findById(hostId);
Integer maxDataVolumesSupported = null;
if (host != null) {
_hostDao.loadDetails(host);
maxDataVolumesSupported = _hypervisorCapabilitiesDao.getMaxDataVolumesLimit(host.getHypervisorType(), host.getDetail("product_version"));
}
if (maxDataVolumesSupported == null) {
maxDataVolumesSupported = 6; // 6 data disks by default if nothing is specified in 'hypervisor_capabilities' table
}
return maxDataVolumesSupported.intValue();
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_ATTACH, eventDescription = "attaching volume", async = true)
public Volume attachVolumeToVM(AttachVolumeCmd command) {
@ -551,10 +569,11 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
}
}
// Check that the VM has less than 6 data volumes attached
// Check that the number of data volumes attached to VM is less than that supported by hypervisor
List<VolumeVO> existingDataVolumes = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK);
if (existingDataVolumes.size() >= 6) {
throw new InvalidParameterValueException("The specified VM already has the maximum number of data disks (6). Please specify another VM.", null);
int maxDataVolumesSupported = getMaxDataVolumesSupported(vm);
if (existingDataVolumes.size() >= maxDataVolumesSupported) {
throw new InvalidParameterValueException("The specified VM already has the maximum number of data disks (" + maxDataVolumesSupported + "). Please specify another VM.", null);
}
// Check that the VM and the volume are in the same zone

View File

@ -132,6 +132,7 @@ def choose_category(fn):
return v
raise Exception('Need to add a category for %s to %s:known_categories' %
(fn, __file__))
sys.exit(1)
for f in sys.argv:

View File

@ -1581,6 +1581,7 @@ CREATE TABLE `cloud`.`hypervisor_capabilities` (
`hypervisor_version` varchar(32),
`max_guests_limit` bigint unsigned DEFAULT 50,
`security_group_enabled` int(1) unsigned DEFAULT 1 COMMENT 'Is security group supported',
`max_data_volumes_limit` int unsigned DEFAULT 6 COMMENT 'Max. data volumes per VM supported by hypervisor',
PRIMARY KEY (`id`),
CONSTRAINT `uc_hypervisor_capabilities__uuid` UNIQUE (`uuid`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
@ -1590,8 +1591,8 @@ INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '5.6', 50, 1);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '5.6 FP1', 50, 1);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '5.6 SP2', 50, 1);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '6.0', 50, 1);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '6.0.2', 50, 1);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit) VALUES ('XenServer', '6.0', 50, 1, 13);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit) VALUES ('XenServer', '6.0.2', 50, 1, 13);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', 'default', 128, 0);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', '4.0', 128, 0);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', '4.1', 128, 0);

View File

@ -342,3 +342,6 @@ CREATE TABLE `cloud`.`s2s_vpn_connection` (
ALTER TABLE `cloud`.`data_center` ADD COLUMN `is_local_storage_enabled` tinyint NOT NULL DEFAULT 0 COMMENT 'Is local storage offering enabled for this data center; 1: enabled, 0: not';
UPDATE `cloud`.`data_center` SET `is_local_storage_enabled` = IF ((SELECT `value` FROM `cloud`.`configuration` WHERE `name`='use.local.storage')='true', 1, 0) WHERE `removed` IS NULL;
ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `max_data_volumes_limit` int unsigned DEFAULT 6 COMMENT 'Max. data volumes per VM supported by hypervisor';
UPDATE TABLE `cloud`.`hypervisor_capabilities` SET `max_data_volumes_limit`=13 WHERE `hypervisor_type`='XenServer' AND (`hypervisor_version`='6.0' OR `hypervisor_version`='6.0.2');

View File

@ -3,12 +3,12 @@ Welcome to the marvin sandbox
In here you should find a few common deployment models of CloudStack that you
can configure with properties files to suit your own deployment. One deployment
model for each of - advanced zone, basic zone and a demo are given.
model for each of - advanced zone, basic zone and a simulator demo are given.
$ ls -
basic/
advanced/
demo/
simulator/
Each property file is divided into logical sections and should be familiar to
those who have deployed CloudStack before. Once you have your properties file
@ -17,3 +17,14 @@ python script provided in the respective folder.
The demo files are from the tutorial for testing with python that can be found
on the wiki.cloudstack.org
A common deployment model of a simulator.cfg that can be used for debugging is
included. This will configure an advanced zone with simulators that can be used
for debugging purposes when you do not have hardware to debug with.
To do this:
$ cd cloudstack-oss/
$ ant run-simulator #This will start up the mgmt server with the simulator seeded
## In another shell
$ ant run-simulator

View File

@ -0,0 +1,159 @@
{
"zones": [
{
"name": "Sandbox-simulator",
"guestcidraddress": "10.1.1.0/24",
"providers": [
{
"broadcastdomainrange": "ZONE",
"name": "VirtualRouter"
}
],
"dns1": "10.147.28.6",
"vlan": "100-200",
"ipranges": [
{
"startip": "10.147.31.2",
"endip": "10.147.31.200",
"netmask": "255.255.255.0",
"vlan": "31",
"gateway": "10.147.31.1"
}
],
"networktype": "Advanced",
"pods": [
{
"endip": "10.147.29.200",
"name": "POD0",
"startip": "10.147.29.2",
"netmask": "255.255.255.0",
"clusters": [
{
"clustername": "C0",
"hypervisor": "simulator",
"hosts": [
{
"username": "root",
"url": "http://sim/c0/h0",
"password": "password"
},
{
"username": "root",
"url": "http://sim/c0/h1",
"password": "password"
}
],
"clustertype": "CloudManaged",
"primaryStorages": [
{
"url": "nfs://10.147.28.6:/export/home/sandbox/primary",
"name": "PS0"
}
]
}
],
"gateway": "10.147.29.1"
}
],
"internaldns1": "10.147.28.6",
"secondaryStorages": [
{
"url": "nfs://10.147.28.6:/export/home/sandbox/secondary"
}
]
}
],
"dbSvr": {
"dbSvr": "localhost",
"passwd": "cloud",
"db": "cloud",
"port": 3306,
"user": "cloud"
},
"logger": [
{
"name": "TestClient",
"file": "/var/log/testclient.log"
},
{
"name": "TestCase",
"file": "/var/log/testcase.log"
}
],
"globalConfig": [
{
"name": "storage.cleanup.interval",
"value": "300"
},
{
"name": "vm.op.wait.interval",
"value": "5"
},
{
"name": "default.page.size",
"value": "10000"
},
{
"name": "instance.name",
"value": "QA"
},
{
"name": "workers",
"value": "10"
},
{
"name": "use.user.concentrated.pod.allocation",
"value": "false"
},
{
"name": "account.cleanup.interval",
"value": "600"
},
{
"name": "guest.domain.suffix",
"value": "sandbox.simulator"
},
{
"name": "expunge.delay",
"value": "60"
},
{
"name": "network.gc.wait",
"value": "60"
},
{
"name": "network.gc.interval",
"value": "60"
},
{
"name": "vm.allocation.algorithm",
"value": "random"
},
{
"name": "expunge.interval",
"value": "60"
},
{
"name": "expunge.workers",
"value": "3"
},
{
"name": "check.pod.cidrs",
"value": "true"
},
{
"name": "secstorage.allowed.internal.sites",
"value": "10.147.28.0/24"
},
{
"name": "direct.agent.load.size",
"value": "1000"
}
],
"mgtSvr": [
{
"mgtSvrIp": "localhost",
"port": 8096
}
]
}

View File

@ -0,0 +1,66 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
usage() {
printf "Usage: %s:\n
[-m mgmt-server ] \n
[-c config-file ] \n
[-d db node url ]\n" $(basename $0) >&2
}
failed() {
exit $1
}
#defaults
FMT=$(date +"%d_%I_%Y_%s")
MGMT_SVR="localhost"
CONFIG="demo/simulator/simulator-smoke.cfg"
DB_SVR="localhost"
while getopts 'd:m:c:' OPTION
do
case $OPTION in
d) dflag=1
DB_SVR="$OPTARG"
;;
m) mflag=1
MGMT_SVR="$OPTARG"
;;
c) cflag=1
CONFIG="$OPTARG"
;;
?) usage
failed 2
;;
esac
done
$(mysql -uroot -Dcloud -h$MGMT_SVR -s -N -r -e"update configuration set value='8096' where name='integration.api.port'")
version_tuple=$(python -c 'import sys; print(sys.version_info[:2])')
if [[ $version_tuple == "(2, 7)" ]]
then
python -m marvin.deployAndRun -c $CONFIG -t /tmp/t.log -r /tmp/r.log -d /tmp
sleep 60
python -m marvin.deployAndRun -c $CONFIG -t /tmp/t.log -r /tmp/r.log -f testSetupSuccess.py -l
cat /tmp/r.log
echo "Done"
else
echo "Python version 2.7 not detected on system. Aborting"
fi

View File

@ -0,0 +1,81 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import marvin
import unittest
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from time import sleep as delay
class TestSetupSuccess(cloudstackTestCase):
"""
Test to verify if the cloudstack is ready to launch tests upon
1. Verify that system VMs are up and running in all zones
2. Verify that built-in templates are Ready in all zones
"""
@classmethod
def setUpClass(cls):
cls.apiClient = super(TestSetupSuccess, cls).getClsTestClient().getApiClient()
zones = listZones.listZonesCmd()
cls.zones_list = cls.apiClient.listZones(zones)
cls.retry = 50
def test_systemVmReady(self):
"""
system VMs need to be ready and Running for each zone in cloudstack
"""
for z in self.zones_list:
retry = self.retry
while retry != 0:
self.debug("looking for system VMs in zone: %s, %s"%(z.id, z.name))
sysvms = listSystemVms.listSystemVmsCmd()
sysvms.zoneid = z.id
sysvms.state = 'Running'
sysvms_list = self.apiClient.listSystemVms(sysvms)
if sysvms_list is not None and len(sysvms_list) == 2:
assert len(sysvms_list) == 2
self.debug("found %d system VMs running {%s}"%(len(sysvms_list), sysvms_list))
break
retry = retry - 1
delay(60) #wait a minute for retry
self.assertNotEqual(retry, 0, "system VMs not Running in zone %s"%z.name)
def test_templateBuiltInReady(self):
"""
built-in templates CentOS to be ready
"""
for z in self.zones_list:
retry = self.retry
while retry != 0:
self.debug("Looking for at least one ready builtin template")
templates = listTemplates.listTemplatesCmd()
templates.templatefilter = 'featured'
templates.listall = 'true'
templates_list = self.apiClient.listTemplates(templates)
if templates_list is not None:
builtins = [tmpl for tmpl in templates_list if tmpl.templatetype == 'BUILTIN' and tmpl.isready == True]
if len(builtins) > 0:
self.debug("Found %d builtins ready for use %s"%(len(builtins), builtins))
break
retry = retry - 1
delay(60) #wait a minute for retry
self.assertNotEqual(retry, 0, "builtIn templates not ready in zone %s"%z.name)
@classmethod
def tearDownClass(cls):
pass

View File

@ -5,10 +5,11 @@
from distutils.core import setup
from sys import version
import sys
if version < "2.7":
print "Marvin needs at least python 2.7, found : \n%s"%version
raise
sys.exit(1)
setup(name="Marvin",
version="0.1.0",

View File

@ -4185,6 +4185,9 @@
cidrlist: { label: 'CIDR list' },
ipsecpsk: { label: 'IPsec Preshared-Key' },
id: { label: 'label.id' },
ikepolicy: { label: 'IKE policy'},
esppolicy:{ label: 'ESP policy'},
lifetime :{label: 'Lifetime (second)'},
domain: { label: 'label.domain' },
account: { label: 'label.account' }
}

View File

@ -545,7 +545,17 @@
},
basicPhysicalNetwork: { //"Netscaler" now
preFilter: cloudStack.preFilter.addLoadBalancerDevice,
preFilter: cloudStack.preFilter.addLoadBalancerDevice,
//Handling the hiding of "dedicated" option
preFilter: function(args) {
if (args.data['network-model'] == 'Basic' && (selectedNetworkOfferingHavingELB || selectedNetworkOfferingHavingEIP)) {
args.$form.find('[rel=dedicated]').hide();
} else {
args.$form.find('[rel=dedicated]').show();
};
cloudStack.preFilter.addLoadBalancerDevice
},
fields: {
ip: {
label: 'label.ip.address'