mirror of https://github.com/apache/cloudstack.git
CLOUDSTACK-10303 : Refactor test data to nuage_test_data.py runnable against simulator (#2483)
* Refactored nuage tests Added simulator support for ConfigDrive Allow all nuage tests to run against simulator Refactored nuage tests to remove code duplication * Move test data from test_data.py to nuage_test_data.py Nuage test data is now contained in nuage_test_data.py instead of test_data.py Removed all nuage test data from nuage_test_data.py * CLOUD-1252 fixed cleanup of vpc tier network * Import libVSD into the codebase * CLOUDSTACK-1253: Volumes are not expunged in simulator * Fixed some merge issues in test_nuage_vsp_mngd_subnets test * Implement GetVolumeStatsCommand in Simulator * Add vspk as marvin nuagevsp dependency, after removing libVSD dependency * correct libVSD files for license purposes pep8 pyflakes compliant
This commit is contained in:
parent
7112affe19
commit
19d6578732
|
|
@ -128,14 +128,14 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
|||
String sql = sbuilder.toString();
|
||||
HostVO host = null;
|
||||
TransactionLegacy txn = TransactionLegacy.currentTxn();
|
||||
try(PreparedStatement pstmt = txn.prepareStatement(sql);) {
|
||||
try (PreparedStatement pstmt = txn.prepareStatement(sql)) {
|
||||
pstmt.setLong(1, poolId);
|
||||
try(ResultSet rs = pstmt.executeQuery();) {
|
||||
while (rs.next()) {
|
||||
long id = rs.getLong(1);
|
||||
host = hostDao.findById(id);
|
||||
}
|
||||
}catch (SQLException e) {
|
||||
} catch (SQLException e) {
|
||||
s_logger.warn("can't find endpoint", e);
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
|
|
|
|||
|
|
@ -33,6 +33,9 @@ import com.cloud.agent.api.CreateVolumeFromSnapshotCommand;
|
|||
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||
import com.cloud.agent.api.GetStorageStatsAnswer;
|
||||
import com.cloud.agent.api.GetStorageStatsCommand;
|
||||
import com.cloud.agent.api.GetVolumeStatsAnswer;
|
||||
import com.cloud.agent.api.GetVolumeStatsCommand;
|
||||
import com.cloud.agent.api.HandleConfigDriveIsoCommand;
|
||||
import com.cloud.agent.api.ManageSnapshotCommand;
|
||||
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
||||
import com.cloud.agent.api.SecStorageSetupCommand;
|
||||
|
|
@ -77,6 +80,8 @@ public interface MockStorageManager extends Manager {
|
|||
|
||||
public Answer DownloadProcess(DownloadProgressCommand cmd);
|
||||
|
||||
GetVolumeStatsAnswer getVolumeStats(GetVolumeStatsCommand cmd);
|
||||
|
||||
public GetStorageStatsAnswer GetStorageStats(GetStorageStatsCommand cmd);
|
||||
|
||||
public Answer ManageSnapshot(ManageSnapshotCommand cmd);
|
||||
|
|
@ -107,4 +112,5 @@ public interface MockStorageManager extends Manager {
|
|||
|
||||
public UploadStatusAnswer getUploadStatus(UploadStatusCommand cmd);
|
||||
|
||||
Answer handleConfigDriveIso(HandleConfigDriveIsoCommand cmd);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,8 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
|
@ -38,6 +40,7 @@ import org.apache.cloudstack.storage.command.DownloadProgressCommand;
|
|||
import org.apache.cloudstack.storage.command.UploadStatusAnswer;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusAnswer.UploadStatus;
|
||||
import org.apache.cloudstack.storage.command.UploadStatusCommand;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.AttachIsoCommand;
|
||||
|
|
@ -52,6 +55,9 @@ import com.cloud.agent.api.CreateVolumeFromSnapshotCommand;
|
|||
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||
import com.cloud.agent.api.GetStorageStatsAnswer;
|
||||
import com.cloud.agent.api.GetStorageStatsCommand;
|
||||
import com.cloud.agent.api.GetVolumeStatsAnswer;
|
||||
import com.cloud.agent.api.GetVolumeStatsCommand;
|
||||
import com.cloud.agent.api.HandleConfigDriveIsoCommand;
|
||||
import com.cloud.agent.api.ManageSnapshotAnswer;
|
||||
import com.cloud.agent.api.ManageSnapshotCommand;
|
||||
import com.cloud.agent.api.ModifyStoragePoolAnswer;
|
||||
|
|
@ -60,6 +66,7 @@ import com.cloud.agent.api.SecStorageSetupAnswer;
|
|||
import com.cloud.agent.api.SecStorageSetupCommand;
|
||||
import com.cloud.agent.api.SecStorageVMSetupCommand;
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.agent.api.VolumeStatsEntry;
|
||||
import com.cloud.agent.api.storage.CopyVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.CopyVolumeCommand;
|
||||
import com.cloud.agent.api.storage.CreateAnswer;
|
||||
|
|
@ -578,6 +585,37 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetVolumeStatsAnswer getVolumeStats(final GetVolumeStatsCommand cmd) {
|
||||
HashMap<String, VolumeStatsEntry> volumeStats =
|
||||
cmd.getVolumeUuids()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Function.identity(),
|
||||
this::getVolumeStat,
|
||||
(v1, v2) -> v1, HashMap::new));
|
||||
|
||||
return new GetVolumeStatsAnswer(cmd, "", volumeStats);
|
||||
}
|
||||
|
||||
private VolumeStatsEntry getVolumeStat(final String volumeUuid) {
|
||||
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB);
|
||||
|
||||
try {
|
||||
txn.start();
|
||||
MockVolumeVO volume = _mockVolumeDao.findByUuid(volumeUuid);
|
||||
txn.commit();
|
||||
return new VolumeStatsEntry(volumeUuid, volume.getSize(), volume.getSize());
|
||||
} catch (Exception ex) {
|
||||
txn.rollback();
|
||||
throw new CloudRuntimeException("Error when finding volume " + volumeUuid, ex);
|
||||
} finally {
|
||||
txn.close();
|
||||
txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
|
||||
txn.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetStorageStatsAnswer GetStorageStats(GetStorageStatsCommand cmd) {
|
||||
String uuid = cmd.getStorageId();
|
||||
|
|
@ -786,9 +824,13 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
|
|||
txn.start();
|
||||
MockVolumeVO template = _mockVolumeDao.findByStoragePathAndType(cmd.getData().getPath());
|
||||
if (template == null) {
|
||||
return new Answer(cmd, false, "can't find object to delete:" + cmd.getData().getPath());
|
||||
if(!((VolumeObjectTO)cmd.getData()).getName().startsWith("ROOT-")) {
|
||||
return new Answer(cmd, false, "can't find object to delete:" + cmd.getData()
|
||||
.getPath());
|
||||
}
|
||||
} else {
|
||||
_mockVolumeDao.remove(template.getId());
|
||||
}
|
||||
_mockVolumeDao.remove(template.getId());
|
||||
txn.commit();
|
||||
} catch (Exception ex) {
|
||||
txn.rollback();
|
||||
|
|
@ -1228,4 +1270,49 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
|
|||
public UploadStatusAnswer getUploadStatus(UploadStatusCommand cmd) {
|
||||
return new UploadStatusAnswer(cmd, UploadStatus.COMPLETED);
|
||||
}
|
||||
|
||||
@Override public Answer handleConfigDriveIso(HandleConfigDriveIsoCommand cmd) {
|
||||
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB);
|
||||
MockSecStorageVO sec;
|
||||
try {
|
||||
txn.start();
|
||||
sec = _mockSecStorageDao.findByUrl(cmd.getDestStore().getUrl());
|
||||
if (sec == null) {
|
||||
return new Answer(cmd, false, "can't find secondary storage");
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
} catch (Exception ex) {
|
||||
txn.rollback();
|
||||
throw new CloudRuntimeException("Error when creating config drive.");
|
||||
} finally {
|
||||
txn.close();
|
||||
txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
|
||||
txn.close();
|
||||
}
|
||||
|
||||
MockVolumeVO template = new MockVolumeVO();
|
||||
String uuid = UUID.randomUUID().toString();
|
||||
template.setName(uuid);
|
||||
template.setPath(sec.getMountPoint() + cmd.getIsoFile());
|
||||
template.setPoolId(sec.getId());
|
||||
template.setSize((long)(Math.random() * 200L) + 200L);
|
||||
template.setStatus(Status.DOWNLOADED);
|
||||
template.setType(MockVolumeType.ISO);
|
||||
txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB);
|
||||
try {
|
||||
txn.start();
|
||||
template = _mockVolumeDao.persist(template);
|
||||
txn.commit();
|
||||
} catch (Exception ex) {
|
||||
txn.rollback();
|
||||
throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when persisting config drive " + template.getName(), ex);
|
||||
} finally {
|
||||
txn.close();
|
||||
txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
|
||||
txn.close();
|
||||
}
|
||||
|
||||
return new Answer(cmd);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ import com.cloud.agent.api.GetHostStatsCommand;
|
|||
import com.cloud.agent.api.GetStorageStatsCommand;
|
||||
import com.cloud.agent.api.GetVmStatsCommand;
|
||||
import com.cloud.agent.api.GetVncPortCommand;
|
||||
import com.cloud.agent.api.GetVolumeStatsCommand;
|
||||
import com.cloud.agent.api.HandleConfigDriveIsoCommand;
|
||||
import com.cloud.agent.api.MaintainCommand;
|
||||
import com.cloud.agent.api.ManageSnapshotCommand;
|
||||
import com.cloud.agent.api.MigrateCommand;
|
||||
|
|
@ -206,6 +208,7 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage
|
|||
@DB
|
||||
@Override
|
||||
public Answer simulate(final Command cmd, final String hostGuid) {
|
||||
s_logger.debug("Simulate command " + cmd);
|
||||
Answer answer = null;
|
||||
Exception exception = null;
|
||||
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB);
|
||||
|
|
@ -363,6 +366,8 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage
|
|||
answer = _mockStorageMgr.Download((DownloadCommand)cmd);
|
||||
} else if (cmd instanceof GetStorageStatsCommand) {
|
||||
answer = _mockStorageMgr.GetStorageStats((GetStorageStatsCommand)cmd);
|
||||
} else if (cmd instanceof GetVolumeStatsCommand) {
|
||||
answer = _mockStorageMgr.getVolumeStats((GetVolumeStatsCommand)cmd);
|
||||
} else if (cmd instanceof ManageSnapshotCommand) {
|
||||
answer = _mockStorageMgr.ManageSnapshot((ManageSnapshotCommand)cmd);
|
||||
} else if (cmd instanceof BackupSnapshotCommand) {
|
||||
|
|
@ -431,8 +436,14 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage
|
|||
answer = storageHandler.handleStorageCommands((StorageSubSystemCommand)cmd);
|
||||
} else if (cmd instanceof FenceCommand) {
|
||||
answer = _mockVmMgr.fence((FenceCommand)cmd);
|
||||
} else if (cmd instanceof GetRouterAlertsCommand || cmd instanceof VpnUsersCfgCommand || cmd instanceof RemoteAccessVpnCfgCommand || cmd instanceof SetMonitorServiceCommand || cmd instanceof AggregationControlCommand ||
|
||||
cmd instanceof SecStorageFirewallCfgCommand) {
|
||||
} else if (cmd instanceof HandleConfigDriveIsoCommand) {
|
||||
answer = _mockStorageMgr.handleConfigDriveIso((HandleConfigDriveIsoCommand)cmd);
|
||||
} else if (cmd instanceof GetRouterAlertsCommand
|
||||
|| cmd instanceof VpnUsersCfgCommand
|
||||
|| cmd instanceof RemoteAccessVpnCfgCommand
|
||||
|| cmd instanceof SetMonitorServiceCommand
|
||||
|| cmd instanceof AggregationControlCommand
|
||||
|| cmd instanceof SecStorageFirewallCfgCommand) {
|
||||
answer = new Answer(cmd);
|
||||
} else {
|
||||
s_logger.error("Simulator does not implement command of type " + cmd.toString());
|
||||
|
|
@ -447,6 +458,8 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage
|
|||
}
|
||||
}
|
||||
|
||||
s_logger.debug("Finished simulate command " + cmd);
|
||||
|
||||
return answer;
|
||||
} catch (final Exception e) {
|
||||
s_logger.error("Failed execute cmd: ", e);
|
||||
|
|
|
|||
2
plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java
Executable file → Normal file
2
plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java
Executable file → Normal file
|
|
@ -85,7 +85,7 @@ public class SimulatorStorageProcessor implements StorageProcessor {
|
|||
public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) {
|
||||
TemplateObjectTO template = new TemplateObjectTO();
|
||||
template.setPath(UUID.randomUUID().toString());
|
||||
template.setSize(new Long(100));
|
||||
template.setSize(100L);
|
||||
template.setFormat(Storage.ImageFormat.RAW);
|
||||
return new CopyCmdAnswer(template);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -118,9 +118,9 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle
|
|||
@Inject
|
||||
VolumeOrchestrationService _volumeMgr;
|
||||
|
||||
public final static String CONFIGDRIVEFILENAME = "configdrive.iso";
|
||||
public final static String CONFIGDRIVEDIR= "ConfigDrive";
|
||||
public final static Integer CONFIGDRIVEDISKSEQ= new Integer(4);
|
||||
private final static String CONFIGDRIVEFILENAME = "configdrive.iso";
|
||||
private final static String CONFIGDRIVEDIR = "ConfigDrive";
|
||||
private final static Integer CONFIGDRIVEDISKSEQ = 4;
|
||||
|
||||
private boolean canHandle(TrafficType trafficType) {
|
||||
return trafficType.equals(TrafficType.Guest);
|
||||
|
|
@ -320,9 +320,10 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle
|
|||
s_logger.debug(String.format("%s config drive ISO for vm %s in host %s",
|
||||
(update?"update":"create"), profile.getInstanceName(), _hostDao.findById(hostId).getName()));
|
||||
EndPoint endpoint = _ep.select(secondaryStore);
|
||||
if (endpoint == null )
|
||||
throw new ResourceUnavailableException(String.format("%s failed, secondary store not available",
|
||||
(update?"Update":"Create")),secondaryStore.getClass(),secondaryStore.getId());
|
||||
if (endpoint == null) {
|
||||
throw new ResourceUnavailableException(String.format("%s failed, secondary store not available", (update ? "Update" : "Create")), secondaryStore.getClass(),
|
||||
secondaryStore.getId());
|
||||
}
|
||||
String isoPath = CONFIGDRIVEDIR + "/" + profile.getInstanceName() + "/" + CONFIGDRIVEFILENAME;
|
||||
HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(profile.getVmData(),
|
||||
profile.getConfigDriveLabel(), secondaryStore.getTO(), isoPath, true, update);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from .client import ApiClient
|
||||
from .helpers import VSDHelpers
|
||||
|
||||
__version__ = "1.0"
|
||||
__all__ = ['ApiClient', 'VSDHelpers']
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import bambou
|
||||
import importlib
|
||||
|
||||
|
||||
class ApiClient(object):
|
||||
"""
|
||||
This class provides utilities to instantiate an API client using vspk.
|
||||
Args:
|
||||
address (str): ip address or hostname where the VSD API is exposed.
|
||||
user (str): username to authenticate on the API.
|
||||
password (str): password to authenticate on the API.
|
||||
enterprise (str): VSD organization to use to authenticate on the API.
|
||||
version (str): version of the API to use.
|
||||
"""
|
||||
|
||||
def __init__(self, address, port='8443', user='csproot',
|
||||
password='csproot', enterprise='csp', version=None):
|
||||
if not version:
|
||||
version = '5.0'
|
||||
self.url = 'https://{}:{}'.format(address, port)
|
||||
self.version = version
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.enterprise = enterprise
|
||||
self.last_pushes = []
|
||||
self.session = None
|
||||
|
||||
@staticmethod
|
||||
def import_vspk(version):
|
||||
"""
|
||||
Return the vspk module corresponding to a given version of the API.
|
||||
Args:
|
||||
version (str): version of the API
|
||||
"""
|
||||
version = 'v{}'.format(str(version).replace('.', '_'))
|
||||
try:
|
||||
vsdk = importlib.import_module('vspk.%s' % version)
|
||||
except:
|
||||
vsdk = importlib.import_module('vspk.vsdk.%s' % version)
|
||||
return vsdk
|
||||
|
||||
def import_vsdenvs(self):
|
||||
"""
|
||||
Return the root class a `vsdenvs`.
|
||||
"""
|
||||
if not self.session:
|
||||
raise Exception('You must have an active session to use vsdenvs')
|
||||
self.vsdenvs = __import__('vsdenvs', globals(), locals(), [], -1)
|
||||
self.vsdenvs.NUCsprootEnvironment.instance = self.session.user
|
||||
|
||||
def new_session(self):
|
||||
"""
|
||||
Start a new API session via vspk an return the corresponding
|
||||
`vspk.NUVSDSession` object. Note that this object is also exposed as
|
||||
`self.session`
|
||||
"""
|
||||
vspk = self.import_vspk(self.version)
|
||||
self.session = vspk.NUVSDSession(
|
||||
username=self.user,
|
||||
password=self.password,
|
||||
enterprise=self.enterprise,
|
||||
api_url=self.url)
|
||||
self.session.start()
|
||||
return self.session
|
||||
|
||||
def start_push_center(self, callback=None):
|
||||
"""
|
||||
Add a vspk push center to the current session.
|
||||
"""
|
||||
if not callback:
|
||||
callback = self.default_callback
|
||||
self.session.push_center.add_delegate(callback)
|
||||
self.session.push_center.start()
|
||||
|
||||
def stop_push_center(self, callback=None):
|
||||
"""
|
||||
Stop the vpsk push center for the current session.
|
||||
"""
|
||||
self.session.push_center.stop()
|
||||
|
||||
def default_callback(self, data):
|
||||
"""
|
||||
Default callback for the push center. It just stores the new event in
|
||||
a LILO queue exposed as `self.last_pushe`
|
||||
"""
|
||||
self.last_pushes.append(data)
|
||||
# keep only the last 10 events
|
||||
if len(self.last_pushes) == 100:
|
||||
del self.last_pushes[-1]
|
||||
|
||||
def __call__(self):
|
||||
if not self.session:
|
||||
return self.new_session()
|
||||
return self.session
|
||||
|
||||
def add_license(self):
|
||||
"""
|
||||
Add a license to the VSD
|
||||
"""
|
||||
vspk = self.import_vspk(self.version)
|
||||
self.session.license = vspk.NULicense(license=self.license)
|
||||
try:
|
||||
self.session.user.create_child(self.session.license)
|
||||
except bambou.exceptions.BambouHTTPError:
|
||||
pass
|
||||
else:
|
||||
self.session.user.add_child(self.session.license)
|
||||
|
||||
def delete_license(self):
|
||||
"""
|
||||
Delete license on the VSD
|
||||
"""
|
||||
self.session.user.licenses.fetch()
|
||||
for license in self.session.user.licenses:
|
||||
try:
|
||||
license.delete()
|
||||
except bambou.exceptions.BambouHTTPError:
|
||||
pass
|
||||
|
|
@ -0,0 +1,602 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import functools
|
||||
import bambou
|
||||
|
||||
LOG = logging.getLogger()
|
||||
|
||||
|
||||
class recreate_session_on_timeout(object):
|
||||
def __init__(self, method):
|
||||
self.method = method
|
||||
|
||||
def __get__(self, obj=None, objtype=None):
|
||||
@functools.wraps(self.method)
|
||||
def _wrapper(*args, **kwargs):
|
||||
try:
|
||||
return self.method(obj, *args, **kwargs)
|
||||
except bambou.exceptions.BambouHTTPError as e:
|
||||
if e.connection.response.status_code == 401:
|
||||
obj.session = obj.api_client.new_session()
|
||||
return self.method(obj, *args, **kwargs)
|
||||
else:
|
||||
raise e
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
class VSDHelpers(object):
|
||||
|
||||
def __init__(self, api_client):
|
||||
"""
|
||||
Create a wrapper
|
||||
provide a cspsession and a vpsk object, all from the VSD object
|
||||
"""
|
||||
self.api_client = api_client
|
||||
self.session = api_client.session
|
||||
self.vspk = api_client.import_vspk(api_client.version)
|
||||
|
||||
def update_vsd_session(self, api_client=None):
|
||||
"""
|
||||
This method is used when Helper is
|
||||
initialized before we create a new_session.
|
||||
"""
|
||||
if api_client:
|
||||
self.session = api_client.session
|
||||
self.vspk = api_client.import_vspk(api_client.version)
|
||||
else:
|
||||
self.session = self.api_client.session
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def add_user_to_group(self, enterprise, user=None, group=None,
|
||||
usr_filter=None, grp_filter=None):
|
||||
"""
|
||||
Add user to a group on VSD.
|
||||
For example: Add csproot to cms group
|
||||
Here you can couple of things:
|
||||
1. enterprise can be id or NURest Object
|
||||
2. And User group both need to be NURest object
|
||||
or both can be filters.
|
||||
"""
|
||||
if not isinstance(enterprise, self.vspk.NUEnterprise):
|
||||
enterprise = self.vspk.NUEnterprise(id=enterprise)
|
||||
if isinstance(group, self.vspk.NUGroup):
|
||||
if isinstance(user, self.vspk.NUUser):
|
||||
all_users = group.users.get()
|
||||
all_users.append(user)
|
||||
group.assign(all_users, self.vspk.NUUser)
|
||||
elif usr_filter and grp_filter:
|
||||
group = enterprise.groups.get_first(filter=grp_filter)
|
||||
all_users = group.users.get()
|
||||
user = enterprise.users.get_first(filter=usr_filter)
|
||||
if not group:
|
||||
LOG.error('could not fetch the group matching the filter "{}"'
|
||||
.format(grp_filter))
|
||||
return
|
||||
if not user:
|
||||
LOG.error('could not fetch the user matching the filter "{}"'
|
||||
.format(usr_filter))
|
||||
return
|
||||
all_users.append(user)
|
||||
group.assign(all_users, self.vspk.NUUser)
|
||||
|
||||
def set_name_filter(self, name):
|
||||
""" set name filter for vsd query
|
||||
@param: name: string name
|
||||
@return: filter string
|
||||
"""
|
||||
return 'name is "{}"'.format(name)
|
||||
|
||||
def set_externalID_filter(self, id):
|
||||
""" set externalID filter for vsd query
|
||||
@param: id: string externalID
|
||||
@return: filter string
|
||||
"""
|
||||
return 'externalID is "{}"'.format(id)
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_enterprise(self, filter):
|
||||
""" get_enterprise
|
||||
@params: enterprise filter following vspk filter structure
|
||||
@return: enterprise object
|
||||
@Example:
|
||||
self.vsd.get_enterprise(
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
enterprise = self.session.user.enterprises.get_first(filter=filter)
|
||||
if not enterprise:
|
||||
LOG.error('could not fetch the enterprise matching the filter "{}"'
|
||||
.format(filter))
|
||||
return enterprise
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_l2domain(self, enterprise=None, filter=None):
|
||||
""" get_l2domain
|
||||
@params: enterprise object or enterprise id
|
||||
filter following vspk filter structure
|
||||
@return l2 domain object
|
||||
@Example:
|
||||
self.vsd.get_l2domain(enterprise=enterprise,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_l2domain(enterprise=enterprise_id,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_l2domain(filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
l2_domain = None
|
||||
if enterprise:
|
||||
if not isinstance(enterprise, self.vspk.NUEnterprise):
|
||||
enterprise = self.vspk.NUEnterprise(id=enterprise)
|
||||
l2_domain = enterprise.l2_domains.get_first(filter=filter)
|
||||
elif filter:
|
||||
l2_domain = self.session.user.l2_domains.get_first(filter=filter)
|
||||
if not l2_domain:
|
||||
LOG.error('could not fetch the l2 domain matching the filter "{}"'
|
||||
.format(filter))
|
||||
return l2_domain
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_domain(self, enterprise=None, filter=None):
|
||||
""" get_domain
|
||||
@params: enterprise object or enterprise id
|
||||
filter following vspk filter structure
|
||||
@return: domain object
|
||||
@Example:
|
||||
self.vsd.get_domain(enterprise=enterprise,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_domain(enterprise=enterprise_id,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_domain(filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
domain = None
|
||||
if enterprise:
|
||||
if not isinstance(enterprise, self.vspk.NUEnterprise):
|
||||
enterprise = self.vspk.NUEnterprise(id=enterprise)
|
||||
domain = enterprise.domains.get_first(filter=filter)
|
||||
elif filter:
|
||||
domain = self.session.user.domains.get_first(filter=filter)
|
||||
if not domain:
|
||||
LOG.error('could not fetch the domain matching the filter "{}"'
|
||||
.format(filter))
|
||||
return domain
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_domain_template(self, enterprise=None, filter=None):
|
||||
""" get_domain
|
||||
@params: enterprise object or enterprise id
|
||||
filter following vspk filter structure
|
||||
@return: domain object
|
||||
@Example:
|
||||
self.vsd.get_domain(enterprise=enterprise,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_domain(enterprise=enterprise_id,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_domain(filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
domain = None
|
||||
if enterprise:
|
||||
if not isinstance(enterprise, self.vspk.NUEnterprise):
|
||||
enterprise = self.vspk.NUEnterprise(id=enterprise)
|
||||
domain = enterprise.domain_templates.get_first(filter=filter)
|
||||
elif filter:
|
||||
domain = \
|
||||
self.session.user.domain_templates.get_first(filter=filter)
|
||||
if not domain:
|
||||
LOG.error('could not fetch the domain template '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
return domain
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_zone(self, domain=None, filter=None):
|
||||
""" get_zone
|
||||
@params: domain object or domain id
|
||||
filter following vspk filter structure
|
||||
@return: zone object
|
||||
@Example:
|
||||
self.vsd.get_zone(domain=domain,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_zone(domain=domain_id,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_zone(filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
zone = None
|
||||
if domain:
|
||||
if not isinstance(domain, self.vspk.NUDomain):
|
||||
domain = self.vspk.NUDomain(id=domain)
|
||||
zone = domain.zones.get_first(filter=filter)
|
||||
elif filter:
|
||||
zone = self.session.user.zones.get_first(filter=filter)
|
||||
if not zone:
|
||||
LOG.error('could not fetch the zone matching the filter "{}"'
|
||||
.format(filter))
|
||||
return zone
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_subnet(self, zone=None, filter=None):
|
||||
""" get_subnet
|
||||
@params: zone object or zone id
|
||||
filter following vspk filter structure
|
||||
@return: subnet object
|
||||
@Example:
|
||||
self.vsd.get_subnet(zone=zone,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_subnet(zone=zone_id,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_subnet(filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
subnet = None
|
||||
if zone:
|
||||
if not isinstance(zone, self.vspk.NUZone):
|
||||
zone = self.vspk.NUZone(id=zone)
|
||||
subnet = zone.subnets.get_first(filter=filter)
|
||||
elif filter:
|
||||
subnet = self.session.user.subnets.get_first(filter=filter)
|
||||
if not subnet:
|
||||
LOG.error('could not fetch the subnet matching the filter "{}"'
|
||||
.format(filter))
|
||||
return subnet
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_subnet_from_domain(self, domain=None, filter=None):
|
||||
""" get_subnet
|
||||
@params: domain object or domain id
|
||||
filter following vspk filter structure
|
||||
@return: subnet object
|
||||
@Example:
|
||||
self.vsd.get_subnet(domain=domain,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_subnet(domain=domain_id,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_subnet(filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
subnet = None
|
||||
if domain:
|
||||
if not isinstance(domain, self.vspk.NUDomain):
|
||||
domain = self.vspk.NUDomain(id=domain)
|
||||
subnet = domain.subnets.get_first(filter=filter)
|
||||
elif filter:
|
||||
subnet = self.session.user.subnets.get_first(filter=filter)
|
||||
if not subnet:
|
||||
LOG.error('could not fetch the subnet matching the filter "{}"'
|
||||
.format(filter))
|
||||
return subnet
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_vm(self, subnet=None, filter=None):
|
||||
""" get_vm
|
||||
@params: subnet object or subnet id
|
||||
filter following vspk filter structure
|
||||
@return: vm object
|
||||
@Example:
|
||||
self.vsd.get_vm(subnet=subnet,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_vm(subnet=subnet_id,
|
||||
filter='name == "{}"'.format(name))
|
||||
self.vsd.get_vm(filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
vm = None
|
||||
if subnet:
|
||||
if not isinstance(subnet, self.vspk.NUSubnet):
|
||||
subnet = self.vspk.NUSubnet(id=subnet)
|
||||
vm = subnet.vms.get_first(filter=filter)
|
||||
elif filter:
|
||||
vm = self.session.user.vms.get_first(filter=filter)
|
||||
if not vm:
|
||||
LOG.error('could not fetch the vm matching the filter "{}"'
|
||||
.format(filter))
|
||||
return vm
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_subnet_dhcpoptions(self, subnet=None, filter=None):
|
||||
""" get_subnet_dhcpoptions
|
||||
@params: subnet object or
|
||||
subnet filter following vspk filter structure
|
||||
@return: subnet dhcpoptions object
|
||||
@Example:
|
||||
self.vsd.get_subnet_dhcpoptions(subnet=subnet)
|
||||
self.vsd.get_subnet_dhcpoptions(
|
||||
filter='externalID == "{}"'.format(subnet_externalID))
|
||||
"""
|
||||
if not isinstance(subnet, self.vspk.NUSubnet):
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
subnet = self.session.user.subnets.get_first(filter=filter)
|
||||
dhcp_options = subnet.dhcp_options.get()
|
||||
if not dhcp_options:
|
||||
if filter:
|
||||
LOG.error('could not fetch the dhcp options on the subnet '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
else:
|
||||
LOG.error('could not fetch the dhcp options on the subnet')
|
||||
return dhcp_options
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_vport(self, subnet, filter):
|
||||
""" get_vport
|
||||
@params: subnet object
|
||||
vport filter following vspk filter structure
|
||||
@return: vport object
|
||||
@Example:
|
||||
self.vsd.get_vport(subnet=subnet,
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not isinstance(subnet, self.vspk.NUSubnet):
|
||||
LOG.error('a subnet is required')
|
||||
return None
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
vport = subnet.vports.get_first(filter=filter)
|
||||
if not vport:
|
||||
LOG.error('could not fetch the vport from the subnet '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
return vport
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_vm_interface(self, filter):
|
||||
""" get_vm_interface
|
||||
@params: vm interface filter following vspk filter structure
|
||||
@return: vm interface object
|
||||
@Example:
|
||||
self.vsd.get_vm_interface(
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
vm_interface = self.session.user.vm_interfaces.get_first(filter=filter)
|
||||
if not vm_interface:
|
||||
LOG.error('could not fetch the vm interface '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
return vm_interface
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_vm_interface_policydecisions(self, vm_interface=None, filter=None):
|
||||
""" get_vm_interface_policydecisions
|
||||
@params: vm interface object or
|
||||
vm interface filter following vspk filter structure
|
||||
@return: vm interface policydecisions object
|
||||
@Example:
|
||||
self.vsd.get_vm_interface_policydecisions(vm_interface=interface)
|
||||
self.vsd.get_vm_interface_policydecisions(
|
||||
filter='externalID == "{}"'.format(vm_interface_externalID))
|
||||
"""
|
||||
if not isinstance(vm_interface, self.vspk.NUVMInterface):
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
vm_interface = \
|
||||
self.session.user.vm_interfaces.get_first(filter=filter)
|
||||
policy_decisions = self.vspk.NUPolicyDecision(
|
||||
id=vm_interface.policy_decision_id).fetch()
|
||||
if not policy_decisions:
|
||||
if filter:
|
||||
LOG.error('could not fetch the policy decisions on the '
|
||||
'vm interface matching the filter "{}"'
|
||||
.format(filter))
|
||||
else:
|
||||
LOG.error('could not fetch the policy decisions '
|
||||
'on the vm interface')
|
||||
return policy_decisions
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_vm_interface_dhcpoptions(self, vm_interface=None, filter=None):
|
||||
""" get_vm_interface_dhcpoptions
|
||||
@params: vm interface object or
|
||||
vm interface filter following vspk filter structure
|
||||
@return: vm interface dhcpoptions object
|
||||
@Example:
|
||||
self.vsd.get_vm_interface_dhcpoptions(vm_interface=vm_interface)
|
||||
self.vsd.get_vm_interface_dhcpoptions(
|
||||
filter='externalID == "{}"'.format(vm_interface_externalID))
|
||||
"""
|
||||
if not isinstance(vm_interface, self.vspk.NUVMInterface):
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
vm_interface = self.session.user.vm_interfaces.get_first(
|
||||
filter=filter)
|
||||
dhcp_options = vm_interface.dhcp_options.get()
|
||||
if not dhcp_options:
|
||||
if filter:
|
||||
LOG.error('could not fetch the dhcp options on the '
|
||||
'vm interface matching the filter "{}"'
|
||||
.format(filter))
|
||||
else:
|
||||
LOG.error('could not fetch the dhcp options on the '
|
||||
'vm interface')
|
||||
return dhcp_options
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_ingress_acl_entry(self, filter):
|
||||
""" get_ingress_acl_entry
|
||||
@params: ingress acl entry filter following vspk filter structure
|
||||
@return: ingress acl entry object
|
||||
@Example:
|
||||
self.vsd.get_ingress_acl_entry(
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
acl = self.session.user.ingress_acl_entry_templates.get_first(
|
||||
filter=filter)
|
||||
if not acl:
|
||||
LOG.error('could not fetch the ingress acl entry '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
return acl
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_egress_acl_entry(self, filter):
|
||||
""" get_egress_acl_entry
|
||||
@params: egress acl entry filter following vspk filter structure
|
||||
@return: egress acl entry object
|
||||
@Example:
|
||||
self.vsd.get_egress_acl_entry(
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
acl = self.session.user.egress_acl_entry_templates.get_first(
|
||||
filter=filter)
|
||||
if not acl:
|
||||
LOG.error('could not fetch the egress acl entry '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
return acl
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_qoss(self, vport):
|
||||
""" get_qoss
|
||||
@params: vport object
|
||||
@return: qoss object
|
||||
@Example:
|
||||
self.vsd.get_qoss(vport=vport)
|
||||
"""
|
||||
if not isinstance(vport, self.vspk.NUVPort):
|
||||
LOG.error('a vport is required')
|
||||
return None
|
||||
qoss = vport.qoss.get()
|
||||
if not qoss:
|
||||
LOG.error('could not fetch the qoss from the vport')
|
||||
return qoss
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_floating_ip(self, filter):
|
||||
""" get_floating_ip
|
||||
@params: floating ip filter following vspk filter structure
|
||||
@return: floating ip object
|
||||
@Example:
|
||||
self.vsd.get_floating_ip(
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
floating_ip = self.session.user.floating_ips.get_first(filter=filter)
|
||||
if not floating_ip:
|
||||
LOG.error('could not fetch the floating ip '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
return floating_ip
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_ingress_acl_entries(self, filter):
|
||||
""" get_ingress_acl_entries
|
||||
@params: ingress acl entries (templates) filter following vspk
|
||||
filter structure
|
||||
@return: ingress acl entries (objects) list
|
||||
@Example:
|
||||
self.vsd.get_ingress_acl_entries(
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
templates = self.session.user.ingress_acl_templates.get(filter=filter)
|
||||
if not templates:
|
||||
LOG.error('could not fetch the ingress acl entries (templates) '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
return None
|
||||
acls = []
|
||||
for template in templates:
|
||||
tmp = self.vspk.NUIngressACLTemplate(id=template.id)
|
||||
acl = tmp.ingress_acl_entry_templates.get()
|
||||
acls.append(acl)
|
||||
return acls
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_egress_acl_entries(self, filter):
|
||||
""" get_egress_acl_entries
|
||||
@params: egress acl entries (templates) filter
|
||||
following vspk filter structure
|
||||
@return: egress acl entries (objects) list
|
||||
@Example:
|
||||
self.vsd.get_egress_acl_entries(
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
templates = self.session.user.egress_acl_templates.get(filter=filter)
|
||||
if not templates:
|
||||
LOG.error('could not fetch the egress acl entries (templates) '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
return None
|
||||
acls = []
|
||||
for template in templates:
|
||||
tmp = self.vspk.NUEgressACLTemplate(id=template.id)
|
||||
acl = tmp.egress_acl_entry_templates.get()
|
||||
acls.append(acl)
|
||||
return acls
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_shared_network_resource(self, filter):
|
||||
""" get_shared_network_resource
|
||||
@params: shared network resource filter
|
||||
following vspk filter structure
|
||||
@return: shared network resource object
|
||||
@Example:
|
||||
self.vsd.get_shared_network_resource(
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
shared_network_resource = \
|
||||
self.session.user.shared_network_resources.get_first(filter=filter)
|
||||
if not shared_network_resource:
|
||||
LOG.error('could not fetch the shared network resource '
|
||||
'matching the filter "{}"'
|
||||
.format(filter))
|
||||
return shared_network_resource
|
||||
|
||||
@recreate_session_on_timeout
|
||||
def get_virtualip(self, vport, filter):
|
||||
""" get_virtualip
|
||||
@params: vport object
|
||||
virtualip filter following vspk filter structure
|
||||
@return: virtualip object
|
||||
@Example:
|
||||
self.vsd.get_virtualip(vport=vport,
|
||||
filter='externalID == "{}"'.format(ext_id))
|
||||
"""
|
||||
if not isinstance(vport, self.vspk.NUVPort):
|
||||
LOG.error('a vport is required')
|
||||
return None
|
||||
if not filter:
|
||||
LOG.error('a filter is required')
|
||||
return None
|
||||
virtualip = vport.virtual_ips.get_first(filter=filter)
|
||||
|
||||
if not virtualip:
|
||||
LOG.error('could not fetch the virtualip matching the filter "{}"'
|
||||
.format(filter))
|
||||
return virtualip
|
||||
|
|
@ -48,6 +48,10 @@ from marvin.cloudstackAPI import (restartVPC,
|
|||
enableNuageUnderlayVlanIpRange,
|
||||
disableNuageUnderlayVlanIpRange,
|
||||
listNuageUnderlayVlanIpRanges)
|
||||
|
||||
from nuage_test_data import nuage_test_data
|
||||
from nuage_vsp_statistics import VsdDataCollector
|
||||
|
||||
# Import System Modules
|
||||
from retry import retry
|
||||
import importlib
|
||||
|
|
@ -56,10 +60,12 @@ import logging
|
|||
import socket
|
||||
import time
|
||||
import sys
|
||||
from nuage_vsp_statistics import VsdDataCollector
|
||||
|
||||
|
||||
class needscleanup(object):
|
||||
"""
|
||||
Decorator to add the returned object automatically to the cleanup list.
|
||||
"""
|
||||
def __init__(self, method):
|
||||
self.method = method
|
||||
|
||||
|
|
@ -84,6 +90,9 @@ class needscleanup(object):
|
|||
|
||||
|
||||
class gherkin(object):
|
||||
"""Decorator to mark a method as Gherkin style.
|
||||
Add extra colored logging
|
||||
"""
|
||||
BLACK = "\033[0;30m"
|
||||
BLUE = "\033[0;34m"
|
||||
GREEN = "\033[0;32m"
|
||||
|
|
@ -127,6 +136,7 @@ class nuageTestCase(cloudstackTestCase):
|
|||
cls.api_client = cls.test_client.getApiClient()
|
||||
cls.db_client = cls.test_client.getDbConnection()
|
||||
cls.test_data = cls.test_client.getParsedTestDataConfig()
|
||||
cls.test_data.update(nuage_test_data)
|
||||
|
||||
# Get Zones and Domains
|
||||
cls.zones = Zone.list(cls.api_client)
|
||||
|
|
@ -150,7 +160,7 @@ class nuageTestCase(cloudstackTestCase):
|
|||
|
||||
@classmethod
|
||||
def getZoneDetails(cls, zone=None):
|
||||
# Get Zone details
|
||||
"""Get Zone details"""
|
||||
cls.zone = zone if zone else get_zone(
|
||||
cls.api_client,
|
||||
zone_name=cls.test_client.getZoneForTests()
|
||||
|
|
@ -275,12 +285,13 @@ class nuageTestCase(cloudstackTestCase):
|
|||
log_handler.setFormatter(formatter)
|
||||
root.addHandler(log_handler)
|
||||
vsd_info = cls.nuage_vsp_device.__dict__
|
||||
|
||||
cls.debug("Nuage VSP device (VSD) details - %s" % vsd_info)
|
||||
vsd_api_client = ApiClient(
|
||||
address=vsd_info["hostname"],
|
||||
user=vsd_info["username"],
|
||||
password=vsd_info["password"],
|
||||
version=vsd_info["apiversion"][1] + "." + vsd_info["apiversion"][3]
|
||||
address=cls.nuage_vsp_device.hostname,
|
||||
user=cls.nuage_vsp_device.username,
|
||||
password=cls.nuage_vsp_device.password,
|
||||
version=cls.nuage_vsp_device.apiversion[1] + "." + cls.nuage_vsp_device.apiversion[3]
|
||||
)
|
||||
vsd_api_client.new_session()
|
||||
cls.vsd = VSDHelpers(vsd_api_client)
|
||||
|
|
@ -293,7 +304,7 @@ class nuageTestCase(cloudstackTestCase):
|
|||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
# Cleanup resources used
|
||||
cls.debug("Cleaning up the resources")
|
||||
cls.debug("Cleaning up the class resources")
|
||||
for obj in reversed(cls._cleanup):
|
||||
try:
|
||||
if isinstance(obj, VirtualMachine):
|
||||
|
|
@ -304,12 +315,12 @@ class nuageTestCase(cloudstackTestCase):
|
|||
cls.error("Failed to cleanup %s, got %s" % (obj, e))
|
||||
# cleanup_resources(cls.api_client, cls._cleanup)
|
||||
cls._cleanup = []
|
||||
cls.debug("Cleanup complete!")
|
||||
cls.debug("Cleanup class complete!")
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
# Cleanup resources used
|
||||
self.debug("Cleaning up the resources")
|
||||
self.debug("Cleaning up the test resources")
|
||||
for obj in reversed(self.cleanup):
|
||||
try:
|
||||
if isinstance(obj, VirtualMachine):
|
||||
|
|
@ -322,20 +333,28 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.error("Failed to cleanup %s, got %s" % (obj, e))
|
||||
# cleanup_resources(self.api_client, self.cleanup)
|
||||
self.cleanup = []
|
||||
self.debug("Cleanup complete!")
|
||||
self.debug("Cleanup test complete!")
|
||||
return
|
||||
|
||||
# enable_NuageUnderlayPublicIpRange - Enables/configures underlay
|
||||
# networking for the given public IP range in Nuage VSP
|
||||
def enable_NuageUnderlayPublicIpRange(self, vlanid):
|
||||
"""Enables/configures underlay networking
|
||||
for the given public IP range in Nuage VSP
|
||||
|
||||
:param vlanid: Vlan ID
|
||||
:type vlanid: marvin.lib.base.PublicIpRange.vlan
|
||||
"""
|
||||
cmd = enableNuageUnderlayVlanIpRange. \
|
||||
enableNuageUnderlayVlanIpRangeCmd()
|
||||
cmd.id = vlanid
|
||||
self.api_client.enableNuageUnderlayVlanIpRange(cmd)
|
||||
|
||||
# disable_NuageUnderlayPublicIpRange - Disables/de-configures underlay
|
||||
# networking for the given public IP range in Nuage VSP
|
||||
def disable_NuageUnderlayPublicIpRange(self, public_ip_range):
|
||||
"""Disables underlay networking
|
||||
for the given public IP range in Nuage VSP
|
||||
|
||||
:param public_ip_range: Public IP range
|
||||
:type public_ip_range: marvin.lib.base.PublicIpRange
|
||||
"""
|
||||
cmd = disableNuageUnderlayVlanIpRange. \
|
||||
disableNuageUnderlayVlanIpRangeCmd()
|
||||
cmd.id = public_ip_range.vlan.id
|
||||
|
|
@ -344,6 +363,11 @@ class nuageTestCase(cloudstackTestCase):
|
|||
# list_NuageUnderlayPublicIpRanges - Lists underlay networking
|
||||
# enabled/configured public IP ranges in Nuage VSP
|
||||
def list_NuageUnderlayPublicIpRanges(self, public_ip_range=None):
|
||||
"""Lists Vlan IP ranges that have the nuage underlay flag set to True
|
||||
|
||||
:param public_ip_range: Optionally filter by Public IP range
|
||||
:type public_ip_range: marvin.lib.base.PublicIpRange
|
||||
"""
|
||||
cmd = listNuageUnderlayVlanIpRanges.listNuageUnderlayVlanIpRangesCmd()
|
||||
if public_ip_range:
|
||||
cmd.id = public_ip_range.vlan.id
|
||||
|
|
@ -353,6 +377,7 @@ class nuageTestCase(cloudstackTestCase):
|
|||
# create_VpcOffering - Creates VPC offering
|
||||
@needscleanup
|
||||
def create_VpcOffering(cls, vpc_offering, suffix=None):
|
||||
"""Creates VPC offering"""
|
||||
cls.debug("Creating VPC offering")
|
||||
if suffix:
|
||||
vpc_offering["name"] = "VPC_OFF-" + str(suffix)
|
||||
|
|
@ -368,6 +393,16 @@ class nuageTestCase(cloudstackTestCase):
|
|||
@needscleanup
|
||||
def create_Vpc(cls, vpc_offering, cidr='10.1.0.0/16', testdata=None,
|
||||
account=None, networkDomain=None):
|
||||
"""Creates VPC with the given VPC offering
|
||||
:param vpc_offering: vpc offering
|
||||
:type vpc_offering: VpcOffering
|
||||
:param cidr: CIDR
|
||||
:param testdata: vpc details
|
||||
:param account: Account which will be the owner.
|
||||
:param networkDomain:
|
||||
:return: created VPC
|
||||
:rtype: VPC
|
||||
"""
|
||||
if not account:
|
||||
account = cls.account
|
||||
cls.debug("Creating a VPC in the account - %s" % account.name)
|
||||
|
|
@ -389,6 +424,12 @@ class nuageTestCase(cloudstackTestCase):
|
|||
|
||||
# restart_Vpc - Restarts the given VPC with/without cleanup
|
||||
def restart_Vpc(self, vpc, cleanup=False):
|
||||
"""Restarts the given VPC with/without cleanup
|
||||
:param vpc: vpc to restart
|
||||
:type vpc: VPC
|
||||
:param cleanup: whether to restart with cleanup
|
||||
:type cleanup: bool
|
||||
"""
|
||||
self.debug("Restarting VPC with ID - %s" % vpc.id)
|
||||
cmd = restartVPC.restartVPCCmd()
|
||||
cmd.id = vpc.id
|
||||
|
|
@ -401,6 +442,14 @@ class nuageTestCase(cloudstackTestCase):
|
|||
@needscleanup
|
||||
def create_NetworkOffering(cls, net_offering, suffix=None,
|
||||
conserve_mode=False):
|
||||
"""Creates a Network Offering
|
||||
:param net_offering: offering details
|
||||
:type net_offering: object
|
||||
:param suffix: string to append to the offering name
|
||||
:param conserve_mode:
|
||||
:return: created Network Offering
|
||||
:rtype: NetworkOffering
|
||||
"""
|
||||
cls.debug("Creating Network offering")
|
||||
if suffix:
|
||||
net_offering["name"] = "NET_OFF-" + str(suffix)
|
||||
|
|
@ -418,6 +467,23 @@ class nuageTestCase(cloudstackTestCase):
|
|||
def create_Network(cls, nw_off, gateway="10.1.1.1",
|
||||
netmask="255.255.255.0", vpc=None, acl_list=None,
|
||||
testdata=None, account=None, vlan=None, externalid=None):
|
||||
"""Creates Network with the given Network offering
|
||||
:param nw_off: Network offering
|
||||
:type nw_off: NetworkOffering
|
||||
:param gateway: gateway
|
||||
:param netmask: netmask
|
||||
:param vpc: in case of a VPC tier, the parent VPC
|
||||
:type vpc: VPC
|
||||
:param acl_list: in case of a VPC tier, the acl list
|
||||
:type acl_list: NetworkACLList
|
||||
:param testdata: Network details
|
||||
:param account: Account which will be the owner.
|
||||
:param vlan: vlan id
|
||||
:param externalid: external id, in case of VSD managed networks
|
||||
|
||||
:return: created Network
|
||||
:rtype: Network
|
||||
"""
|
||||
if not account:
|
||||
account = cls.account
|
||||
cls.debug("Creating a network in the account - %s" % account.name)
|
||||
|
|
@ -669,6 +735,10 @@ class nuageTestCase(cloudstackTestCase):
|
|||
|
||||
# ssh_into_VM - Gets into the shell of the given VM using its public IP
|
||||
def ssh_into_VM(self, vm, public_ip, reconnect=True, negative_test=False):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: Skipping ssh into VM")
|
||||
return
|
||||
|
||||
self.debug("SSH into VM with ID - %s on public IP address - %s" %
|
||||
(vm.id, public_ip.ipaddress.ipaddress))
|
||||
tries = 1 if negative_test else 3
|
||||
|
|
@ -687,8 +757,15 @@ class nuageTestCase(cloudstackTestCase):
|
|||
|
||||
return retry_ssh()
|
||||
|
||||
# execute_cmd - Executes the given command on the given ssh client
|
||||
def execute_cmd(self, ssh_client, cmd):
|
||||
"""Executes the given command on the given ssh client
|
||||
|
||||
:param ssh_client: SSH session to the remote machine
|
||||
:type ssh_client: marvin.SshClient
|
||||
:param cmd: Command to run on the remote machine
|
||||
:type cmd: str
|
||||
:return: command output
|
||||
"""
|
||||
self.debug("SSH client executing command - %s" % cmd)
|
||||
ret_data = ""
|
||||
out_list = ssh_client.execute(cmd)
|
||||
|
|
@ -699,10 +776,18 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("SSH client executed command result is None")
|
||||
return ret_data
|
||||
|
||||
# wget_from_server - Fetches file with the given file name from a web
|
||||
# server listening on the given public IP address and port
|
||||
|
||||
def wget_from_server(self, public_ip, port=80, file_name="index.html",
|
||||
disable_system_proxies=True):
|
||||
"""Fetches file with the given file name from a web server
|
||||
|
||||
:param public_ip: HTTP server IP
|
||||
:type public_ip: PublicIPAddress
|
||||
:param port: HTTP server port
|
||||
:param file_name: URL path
|
||||
:param disable_system_proxies: whether to bypass system proxy
|
||||
:return: filename, headers
|
||||
"""
|
||||
import urllib
|
||||
if disable_system_proxies:
|
||||
urllib.getproxies = lambda: {}
|
||||
|
|
@ -719,12 +804,15 @@ class nuageTestCase(cloudstackTestCase):
|
|||
(file_name, public_ip.ipaddress.ipaddress, port))
|
||||
return filename, headers
|
||||
|
||||
# validate_NetworkServiceProvider - Validates the given Network Service
|
||||
# Provider in the Nuage VSP Physical Network, matches the given provider
|
||||
# name and state against the list of providers fetched
|
||||
def validate_NetworkServiceProvider(self, provider_name, state=None):
|
||||
"""Validates the Network Service Provider in the Nuage VSP Physical
|
||||
Network"""
|
||||
Network.
|
||||
|
||||
:param provider_name Provider name
|
||||
:param state Expected state
|
||||
:raises AssertionError when provider isn't found,
|
||||
or has an incorrect state.
|
||||
"""
|
||||
self.debug("Validating the creation and state of Network Service "
|
||||
"Provider - %s" % provider_name)
|
||||
providers = NetworkServiceProvider.list(
|
||||
|
|
@ -748,11 +836,19 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Successfully validated the creation and state of Network "
|
||||
"Service Provider - %s" % provider_name)
|
||||
|
||||
# validate_VpcOffering - Validates the given VPC offering, matches the
|
||||
# given VPC offering name and state against the list of VPC offerings
|
||||
# fetched
|
||||
def validate_VpcOffering(self, vpc_offering, state=None):
|
||||
"""Validates the VPC offering"""
|
||||
"""Validates the VPC offering
|
||||
|
||||
Fetches the Vpc offering by id,
|
||||
verifies that the name is correct,
|
||||
and if en expected state is given, verifies that it is correct.
|
||||
|
||||
:param vpc_offering: cs object
|
||||
:type vpc_offering: VpcOffering
|
||||
:param state: optional state
|
||||
:raise AssertionError when VPC offering isn't found,
|
||||
or has an incorrect state.
|
||||
"""
|
||||
self.debug("Validating the creation and state of VPC offering - %s" %
|
||||
vpc_offering.name)
|
||||
vpc_offs = VpcOffering.list(self.api_client,
|
||||
|
|
@ -772,10 +868,18 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Successfully validated the creation and state of VPC "
|
||||
"offering - %s" % vpc_offering.name)
|
||||
|
||||
# validate_Vpc - Validates the given VPC, matches the given VPC name and
|
||||
# state against the list of VPCs fetched
|
||||
def validate_Vpc(self, vpc, state=None):
|
||||
"""Validates the VPC"""
|
||||
"""Validates the VPC
|
||||
|
||||
Fetches the vpc by id,
|
||||
verifies that the name is correct,
|
||||
and if en expected state is given, verifies that it is correct.
|
||||
|
||||
:param vpc: cs object
|
||||
:type vpc: Vpc
|
||||
:param state: optional state
|
||||
:raise AssertionError when vpc isn't found,
|
||||
or has an incorrect state."""
|
||||
self.debug("Validating the creation and state of VPC - %s" % vpc.name)
|
||||
vpcs = VPC.list(self.api_client,
|
||||
id=vpc.id
|
||||
|
|
@ -794,11 +898,19 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Successfully validated the creation and state of VPC - %s"
|
||||
% vpc.name)
|
||||
|
||||
# validate_NetworkOffering - Validates the given Network offering, matches
|
||||
# the given network offering name and state against the list of network
|
||||
# offerings fetched
|
||||
def validate_NetworkOffering(self, net_offering, state=None):
|
||||
"""Validates the Network offering"""
|
||||
"""Validates the Network offering
|
||||
|
||||
Fetches the Network offering by id,
|
||||
verifies that the name is correct,
|
||||
and if en expected state is given, verifies that it is correct.
|
||||
|
||||
:param net_offering: cs object
|
||||
:type net_offering: NetworkOffering
|
||||
:param state: optional state
|
||||
:raise AssertionError when network offering isn't found,
|
||||
or has an incorrect state."""
|
||||
|
||||
self.debug("Validating the creation and state of Network offering - %s"
|
||||
% net_offering.name)
|
||||
net_offs = NetworkOffering.list(self.api_client,
|
||||
|
|
@ -818,10 +930,18 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Successfully validated the creation and state of Network "
|
||||
"offering - %s" % net_offering.name)
|
||||
|
||||
# validate_Network - Validates the given network, matches the given network
|
||||
# name and state against the list of networks fetched
|
||||
def validate_Network(self, network, state=None):
|
||||
"""Validates the network"""
|
||||
"""Validates the network
|
||||
|
||||
Fetches the Network by id,
|
||||
verifies that the name is correct,
|
||||
and if en expected state is given, verifies that it is correct.
|
||||
|
||||
:param network: cs object
|
||||
:type network: Network
|
||||
:param state: optional state
|
||||
:raise AssertionError when network isn't found,
|
||||
or has an incorrect state."""
|
||||
self.debug("Validating the creation and state of Network - %s" %
|
||||
network.name)
|
||||
networks = Network.list(self.api_client,
|
||||
|
|
@ -841,10 +961,14 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Successfully validated the creation and state of Network "
|
||||
"- %s" % network.name)
|
||||
|
||||
# check_VM_state - Checks if the given VM is in the expected state form the
|
||||
# list of fetched VMs
|
||||
def check_VM_state(self, vm, state=None):
|
||||
"""Validates the VM state"""
|
||||
"""Validates the VM state
|
||||
:param vm: cs object
|
||||
:type vm: VirtualMachine
|
||||
:param state: optional state
|
||||
:raise AssertionError when vm isn't found,
|
||||
or has an incorrect state."""
|
||||
|
||||
self.debug("Validating the deployment and state of VM - %s" % vm.name)
|
||||
vms = VirtualMachine.list(self.api_client,
|
||||
id=vm.id,
|
||||
|
|
@ -860,10 +984,14 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Successfully validated the deployment and state of VM - %s"
|
||||
% vm.name)
|
||||
|
||||
# check_Router_state - Checks if the given router is in the expected state
|
||||
# form the list of fetched routers
|
||||
def check_Router_state(self, router, state=None):
|
||||
"""Validates the Router state"""
|
||||
"""Validates the Router state
|
||||
:param router: cs object
|
||||
:type router: Router
|
||||
:param state: optional state
|
||||
:raise AssertionError when router isn't found,
|
||||
or has an incorrect state."""
|
||||
|
||||
self.debug("Validating the deployment and state of Router - %s" %
|
||||
router.name)
|
||||
routers = Router.list(self.api_client,
|
||||
|
|
@ -880,11 +1008,20 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Successfully validated the deployment and state of Router "
|
||||
"- %s" % router.name)
|
||||
|
||||
# validate_PublicIPAddress - Validates if the given public IP address is in
|
||||
# the expected state form the list of fetched public IP addresses
|
||||
def validate_PublicIPAddress(self, public_ip, network, static_nat=False,
|
||||
vm=None):
|
||||
"""Validates the Public IP Address"""
|
||||
"""Validates the Public IP Address
|
||||
:param public_ip: cs object
|
||||
:type public_ip: PublicIPAddress
|
||||
:param network: cs object
|
||||
:type network: Network
|
||||
:param static_nat: optional state
|
||||
:type static_nat: bool
|
||||
:param vm: Virtual machine the public ip should be forwarding to.
|
||||
:type vm: VirtualMachine
|
||||
:raise AssertionError when Public IP isn't found, isn't Allocated
|
||||
or has an incorrect ip address."""
|
||||
|
||||
self.debug("Validating the assignment and state of public IP address "
|
||||
"- %s" % public_ip.ipaddress.ipaddress)
|
||||
public_ips = PublicIPAddress.list(self.api_client,
|
||||
|
|
@ -913,10 +1050,14 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Successfully validated the assignment and state of public "
|
||||
"IP address - %s" % public_ip.ipaddress.ipaddress)
|
||||
|
||||
# verify_VRWithoutPublicIPNIC - Verifies that the given Virtual Router has
|
||||
# no public IP and NIC
|
||||
def verify_VRWithoutPublicIPNIC(self, vr):
|
||||
"""Verifies VR without Public IP and NIC"""
|
||||
"""Verifies that the given Virtual Router has no public IP nor NIC
|
||||
:param vr: cs object
|
||||
:type vr: Router
|
||||
:raise AssertionError when router isn't found,
|
||||
has an incorrect name, has a public ip for source nat
|
||||
or has a nic in the public network."""
|
||||
|
||||
self.debug("Verifies that there is no public IP and NIC in Virtual "
|
||||
"Router - %s" % vr.name)
|
||||
self.assertEqual(vr.publicip, None,
|
||||
|
|
@ -930,6 +1071,11 @@ class nuageTestCase(cloudstackTestCase):
|
|||
"in Virtual Router - %s" % vr.name)
|
||||
|
||||
def verify_vpc_has_no_src_nat(self, vpc, account=None):
|
||||
"""Verifies that the given Vpc has no public IP nor NIC
|
||||
:param vpc: cs object
|
||||
:type vpc: VPC
|
||||
:raise AssertionError when the VPC has a public ip for source nat.
|
||||
"""
|
||||
if not account:
|
||||
account = self.account
|
||||
self.debug("Verify that there is no src NAT ip address "
|
||||
|
|
@ -944,9 +1090,14 @@ class nuageTestCase(cloudstackTestCase):
|
|||
# VSD verifications; VSD is a programmable policy and analytics engine of
|
||||
# Nuage VSP SDN platform
|
||||
|
||||
# get_externalID_filter - Returns corresponding external ID filter of the
|
||||
# given object in VSD
|
||||
def get_externalID_filter(self, object_id):
|
||||
"""Builds a VSD filter to search by external ID
|
||||
|
||||
:param object_id: Cloudstack UUID
|
||||
:type object_id: str
|
||||
:rtype: str
|
||||
:return: filter
|
||||
"""
|
||||
ext_id = object_id + "@" + self.cms_id
|
||||
return self.vsd.set_externalID_filter(ext_id)
|
||||
|
||||
|
|
@ -963,8 +1114,28 @@ class nuageTestCase(cloudstackTestCase):
|
|||
:param cs_objects: Cloudstack objects to take the UUID from.
|
||||
:return: the VSPK object having the correct externalID
|
||||
"""
|
||||
return fetcher.get_first(filter="externalID BEGINSWITH '%s'" %
|
||||
":".join([o.id for o in cs_objects]))
|
||||
object_id = ":".join([o.id for o in cs_objects])
|
||||
ext_id = object_id + "@" + self.cms_id
|
||||
return fetcher.get_first(filter="externalID is '%s'" % ext_id)
|
||||
|
||||
def fetch_vsd_objects(self, domain_id, network, vpc=None):
|
||||
vsd_enterprise = self.vsd.get_enterprise(
|
||||
filter=self.get_externalID_filter(domain_id))
|
||||
|
||||
ext_network_filter = self.get_externalID_filter(vpc.id) if vpc \
|
||||
else self.get_externalID_filter(network.id)
|
||||
vsd_domain = self.vsd.get_domain(filter=ext_network_filter)
|
||||
vsd_zone = self.vsd.get_zone(filter=ext_network_filter)
|
||||
vsd_subnet = self.vsd.get_subnet(
|
||||
filter=self.get_externalID_filter(network.id))
|
||||
|
||||
return [
|
||||
ext_network_filter,
|
||||
vsd_enterprise,
|
||||
vsd_domain,
|
||||
vsd_zone,
|
||||
vsd_subnet
|
||||
]
|
||||
|
||||
# verify_vsd_network - Verifies the given CloudStack domain and network/VPC
|
||||
# against the corresponding installed enterprise, domain, zone, and subnet
|
||||
|
|
@ -973,14 +1144,15 @@ class nuageTestCase(cloudstackTestCase):
|
|||
domain_template_name=None):
|
||||
self.debug("Verifying the creation and state of Network - %s in VSD" %
|
||||
network.name)
|
||||
vsd_enterprise = self.vsd.get_enterprise(
|
||||
filter=self.get_externalID_filter(domain_id))
|
||||
ext_network_filter = self.get_externalID_filter(vpc.id) if vpc \
|
||||
else self.get_externalID_filter(network.id)
|
||||
vsd_domain = self.vsd.get_domain(filter=ext_network_filter)
|
||||
vsd_zone = self.vsd.get_zone(filter=ext_network_filter)
|
||||
vsd_subnet = self.vsd.get_subnet(
|
||||
filter=self.get_externalID_filter(network.id))
|
||||
|
||||
[
|
||||
ext_network_filter,
|
||||
vsd_enterprise,
|
||||
vsd_domain,
|
||||
vsd_zone,
|
||||
vsd_subnet
|
||||
] = self.fetch_vsd_objects(domain_id, network, vpc)
|
||||
|
||||
self.assertEqual(vsd_enterprise.name, domain_id,
|
||||
"VSD enterprise name should match CloudStack domain "
|
||||
"uuid"
|
||||
|
|
@ -1039,14 +1211,23 @@ class nuageTestCase(cloudstackTestCase):
|
|||
filter=self.get_externalID_filter(network.id))
|
||||
self.assertEqual(vsd_subnet, None, "Network is present on the vsd.")
|
||||
|
||||
# get_subnet_id - Calculates and returns the subnet ID in VSD with the
|
||||
# given CloudStack network ID and subnet gateway
|
||||
def get_subnet_id(self, network_id, gateway):
|
||||
""" Calculates the subnet ID in VSD with
|
||||
the given CloudStack network ID and subnet gateway
|
||||
|
||||
:param gateway: Gateway
|
||||
:type gateway: str
|
||||
:type network_id: str
|
||||
:rtype: str
|
||||
:return: Expected Subnet UUID
|
||||
|
||||
"""
|
||||
try:
|
||||
import uuid
|
||||
|
||||
class NULL_NAMESPACE:
|
||||
bytes = b''
|
||||
|
||||
# The UUID of the shared network in ACS
|
||||
# The gateway IP of the address range
|
||||
network_id = str(network_id)
|
||||
|
|
@ -1059,11 +1240,12 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Failed to get the subnet id due to %s" % e)
|
||||
self.fail("Unable to get the subnet id, failing the test case")
|
||||
|
||||
# verify_vsd_shared_network - Verifies the given CloudStack domain and
|
||||
# shared network against the corresponding installed enterprise, domain,
|
||||
# zone, subnet, and shared network resource in VSD
|
||||
|
||||
def verify_vsd_shared_network(self, domain_id, network,
|
||||
gateway="10.1.1.1"):
|
||||
"""Verifies the given CloudStack domain and
|
||||
shared network against the corresponding installed enterprise,
|
||||
domain, zone, subnet, and shared network resource in VSD"""
|
||||
self.debug("Verifying the creation and state of Shared Network - %s "
|
||||
"in VSD" % network.name)
|
||||
vsd_enterprise = self.vsd.get_enterprise(
|
||||
|
|
@ -1074,6 +1256,7 @@ class nuageTestCase(cloudstackTestCase):
|
|||
subnet_id = self.get_subnet_id(network.id, gateway)
|
||||
vsd_subnet = self.vsd.get_subnet(
|
||||
filter=self.get_externalID_filter(subnet_id))
|
||||
|
||||
self.assertNotEqual(vsd_enterprise, None,
|
||||
"VSD enterprise (CloudStack domain) data format "
|
||||
"should not be of type None"
|
||||
|
|
@ -1099,9 +1282,13 @@ class nuageTestCase(cloudstackTestCase):
|
|||
self.debug("Successfully verified the creation and state of Shared "
|
||||
"Network - %s in VSD" % network.name)
|
||||
|
||||
# verify_vsd_object_status - Verifies the given CloudStack object status in
|
||||
# VSD
|
||||
def verify_vsd_object_status(self, cs_object, stopped):
|
||||
""" Verifies the VM status in VSD for a given Cloudstack VM,
|
||||
retrying every 5 seconds for 10 minutes.
|
||||
|
||||
:param cs_object: Cloudstack VM
|
||||
:param stopped: boolean: specifying if the vm is stopped.
|
||||
"""
|
||||
vsd_object = self.vsd.get_vm(
|
||||
filter=self.get_externalID_filter(cs_object.id))
|
||||
expected_status = cs_object.state.upper() if not stopped \
|
||||
|
|
@ -1280,6 +1467,7 @@ class nuageTestCase(cloudstackTestCase):
|
|||
public_ipaddress.vlanid)
|
||||
vsd_fip_subnet = self.vsd.get_shared_network_resource(
|
||||
filter=ext_fip_subnet_filter)
|
||||
|
||||
if self.isNuageInfraUnderlay:
|
||||
self.assertEqual(vsd_fip_subnet.underlay, True,
|
||||
"Floating IP subnet in VSD should be underlay "
|
||||
|
|
@ -1290,6 +1478,7 @@ class nuageTestCase(cloudstackTestCase):
|
|||
"Floating IP subnet in VSD should be underlay "
|
||||
"disabled"
|
||||
)
|
||||
|
||||
ext_network_filter = self.get_externalID_filter(vpc.id) if vpc \
|
||||
else self.get_externalID_filter(network.id)
|
||||
vsd_domain = self.vsd.get_domain(filter=ext_network_filter)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -76,153 +76,6 @@ class MySSHKeyPair:
|
|||
apiclient.deleteSSHKeyPair(cmd)
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test Add Remove Network Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"isolated_configdrive_network_offering_withoutdns" : {
|
||||
"name": 'nuage_configdrive_withoutDns_marvin',
|
||||
"displaytext": 'nuage_configdrive_withoutDns_marvin',
|
||||
"guestiptype": 'Isolated',
|
||||
"supportedservices": 'Dhcp,SourceNat,Connectivity,StaticNat,UserData,Firewall',
|
||||
"traffictype": 'GUEST',
|
||||
"availability": 'Optional',
|
||||
"serviceProviderList": {
|
||||
"Dhcp": 'NuageVsp',
|
||||
"StaticNat": 'NuageVsp',
|
||||
"SourceNat": 'NuageVsp',
|
||||
"Firewall": 'NuageVsp',
|
||||
"Connectivity": 'NuageVsp',
|
||||
"UserData": 'ConfigDrive'
|
||||
},
|
||||
"serviceCapabilityList": {
|
||||
"SourceNat": {"SupportedSourceNatTypes": "perzone"}
|
||||
}
|
||||
},
|
||||
"isolated_configdrive_network_offering": {
|
||||
"name": 'nuage_configdrive_marvin',
|
||||
"displaytext": 'nuage_configdrive_marvin',
|
||||
"guestiptype": 'Isolated',
|
||||
"supportedservices": 'Dhcp,SourceNat,Connectivity,StaticNat,UserData,Firewall,Dns',
|
||||
"traffictype": 'GUEST',
|
||||
"availability": 'Optional',
|
||||
"serviceProviderList": {
|
||||
"Dhcp": 'NuageVsp',
|
||||
"StaticNat": 'NuageVsp',
|
||||
"SourceNat": 'NuageVsp',
|
||||
"Firewall": 'NuageVsp',
|
||||
"Connectivity": 'NuageVsp',
|
||||
"UserData": 'ConfigDrive',
|
||||
"Dns": 'VirtualRouter'
|
||||
},
|
||||
"serviceCapabilityList": {
|
||||
"SourceNat": {"SupportedSourceNatTypes": "perzone"}
|
||||
}
|
||||
},
|
||||
"vpc_network_offering_configdrive_withoutdns" : {
|
||||
"name": 'nuage_vpc_marvin_configdrive_withoutdns',
|
||||
"displaytext": 'nuage_vpc_marvin_configdrive_withoutdns',
|
||||
"guestiptype": 'Isolated',
|
||||
"supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,Connectivity,UserData',
|
||||
"traffictype": 'GUEST',
|
||||
"availability": 'Optional',
|
||||
"useVpc": 'on',
|
||||
"ispersistent": 'True',
|
||||
"serviceProviderList": {
|
||||
"Dhcp": "NuageVsp",
|
||||
"StaticNat": "NuageVsp",
|
||||
"SourceNat": "NuageVsp",
|
||||
"NetworkACL": "NuageVsp",
|
||||
"Connectivity": "NuageVsp",
|
||||
"UserData": "ConfigDrive"
|
||||
},
|
||||
"serviceCapabilityList": {
|
||||
"SourceNat": {"SupportedSourceNatTypes": "perzone"}
|
||||
}
|
||||
},
|
||||
"vpc_network_offering_configdrive_withdns" : {
|
||||
"name": 'nuage_vpc_marvin_configdrive_withdns',
|
||||
"displaytext": 'nuage_vpc_marvin_configdrive_withdns',
|
||||
"guestiptype": 'Isolated',
|
||||
"supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,Connectivity,UserData,Dns',
|
||||
"traffictype": 'GUEST',
|
||||
"availability": 'Optional',
|
||||
"useVpc": 'on',
|
||||
"ispersistent": 'True',
|
||||
"serviceProviderList": {
|
||||
"Dhcp": "NuageVsp",
|
||||
"StaticNat": "NuageVsp",
|
||||
"SourceNat": "NuageVsp",
|
||||
"NetworkACL": "NuageVsp",
|
||||
"Connectivity": "NuageVsp",
|
||||
"UserData": "ConfigDrive",
|
||||
"Dns": "VpcVirtualRouter"
|
||||
},
|
||||
"serviceCapabilityList": {
|
||||
"SourceNat": {"SupportedSourceNatTypes": "perzone"}
|
||||
}
|
||||
},
|
||||
"vpc_offering_configdrive_withoutdns" : {
|
||||
"name": 'Nuage VSP VPC offering ConfigDrive',
|
||||
"displaytext": 'Nuage VSP VPC offering ConfigDrive',
|
||||
"supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,Connectivity,UserData',
|
||||
"serviceProviderList": {
|
||||
"Dhcp": "NuageVsp",
|
||||
"StaticNat": "NuageVsp",
|
||||
"SourceNat": "NuageVsp",
|
||||
"NetworkACL": "NuageVsp",
|
||||
"Connectivity": "NuageVsp",
|
||||
"UserData": "ConfigDrive"
|
||||
}
|
||||
},
|
||||
"vpc_offering_configdrive_withdns" :{
|
||||
"name": 'Nuage VSP VPC offering ConfigDrive withVR',
|
||||
"displaytext": 'Nuage VSP VPC offering ConfigDrive withVR',
|
||||
"supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,Connectivity,UserData,Dns',
|
||||
"serviceProviderList": {
|
||||
"Dhcp": "NuageVsp",
|
||||
"StaticNat": "NuageVsp",
|
||||
"SourceNat": "NuageVsp",
|
||||
"NetworkACL": "NuageVsp",
|
||||
"Connectivity": "NuageVsp",
|
||||
"UserData": "ConfigDrive",
|
||||
"Dns": "VpcVirtualRouter"
|
||||
}
|
||||
},
|
||||
"shared_nuage_network_config_drive_offering" : {
|
||||
"name": 'nuage_marvin',
|
||||
"displaytext": 'nuage_marvin',
|
||||
"guestiptype": 'shared',
|
||||
"supportedservices": 'Dhcp,Connectivity,UserData',
|
||||
"traffictype": 'GUEST',
|
||||
"specifyVlan": "False",
|
||||
"specifyIpRanges": "True",
|
||||
"availability": 'Optional',
|
||||
"serviceProviderList": {
|
||||
"Dhcp": "NuageVsp",
|
||||
"Connectivity": "NuageVsp",
|
||||
"UserData": 'ConfigDrive'
|
||||
},
|
||||
"serviceCapabilityList": {
|
||||
"Connectivity": {
|
||||
"PublicAccess": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
"network_all2" : {
|
||||
"name": "SharedNetwork2-All-nuage",
|
||||
"displaytext": "SharedNetwork2-All-nuage",
|
||||
"gateway": "10.200.200.1",
|
||||
"netmask": "255.255.255.0",
|
||||
"startip": "10.200.200.21",
|
||||
"endip": "10.200.200.100",
|
||||
"acltype": "Domain"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class TestNuageConfigDrive(nuageTestCase):
|
||||
"""Test user data and password reset functionality
|
||||
using configDrive with Nuage VSP SDN plugin
|
||||
|
|
@ -308,6 +161,7 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
def run(self):
|
||||
self.expected_user_data = "hello world vm %s" % self.vm.name
|
||||
user_data = base64.b64encode(self.expected_user_data)
|
||||
self.end = None
|
||||
self.start = datetime.now()
|
||||
self.vm.update(self.nuagetestcase.api_client, userdata=user_data)
|
||||
self.end = datetime.now()
|
||||
|
|
@ -321,6 +175,8 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
return self.vm
|
||||
|
||||
def get_timestamps(self):
|
||||
if not self.end:
|
||||
self.end = datetime.now()
|
||||
return [self.start, self.end]
|
||||
|
||||
def get_userdata(self):
|
||||
|
|
@ -356,6 +212,8 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
return self.vm
|
||||
|
||||
def get_timestamps(self):
|
||||
if not self.end:
|
||||
self.end = datetime.now()
|
||||
return [self.start, self.end]
|
||||
|
||||
def get_password(self):
|
||||
|
|
@ -368,7 +226,6 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(TestNuageConfigDrive, cls).setUpClass()
|
||||
cls.test_data["nuagevsp"].update(Services().services)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
|
|
@ -480,7 +337,7 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
'Userdata found: %s is not equal to expected: %s'
|
||||
% (vmuserdata, userdata))
|
||||
|
||||
def verifyPassword(self, vm, ssh, iso_path, password):
|
||||
def verifyPassword(self, ssh, iso_path, password):
|
||||
self.debug("Expected VM password is %s " % password.password)
|
||||
password_file = iso_path+"/cloudstack/password/vm_password.txt"
|
||||
cmd = "cat %s" % password_file
|
||||
|
|
@ -520,11 +377,7 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
res = ssh.execute(cmd)
|
||||
metadata[file] = res
|
||||
|
||||
metadata_files = ["availability-zone.txt",
|
||||
"instance-id.txt",
|
||||
"service-offering.txt",
|
||||
"vm-id.txt"]
|
||||
for mfile in metadata_files:
|
||||
for mfile in vm_files:
|
||||
if mfile not in metadata:
|
||||
self.fail("{} file is not found in vm metadata".format(mfile))
|
||||
self.assertEqual(
|
||||
|
|
@ -546,7 +399,7 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
)
|
||||
return
|
||||
|
||||
def verifyOpenStackData(self, vm, ssh, iso_path):
|
||||
def verifyOpenStackData(self, ssh, iso_path):
|
||||
|
||||
openstackdata_dir = iso_path+"/openstack/latest/"
|
||||
openstackdata = {}
|
||||
|
|
@ -658,6 +511,10 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
metadata=False,
|
||||
sshkey=None,
|
||||
ssh_client=None):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: Skipping Config Drive content verification")
|
||||
return
|
||||
|
||||
self.debug("SSHing into the VM %s" % vm.name)
|
||||
if ssh_client is None:
|
||||
ssh = self.ssh_into_VM(vm, public_ip)
|
||||
|
|
@ -672,7 +529,7 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
self.debug("Verifying metadata for vm: %s" % vm.name)
|
||||
self.verifyMetaData(vm, ssh, config_drive_path)
|
||||
self.debug("Verifying openstackdata for vm: %s" % vm.name)
|
||||
self.verifyOpenStackData(vm, ssh, config_drive_path)
|
||||
self.verifyOpenStackData(ssh, config_drive_path)
|
||||
|
||||
if userdata is not None:
|
||||
self.debug("Verifying userdata for vm: %s" % vm.name)
|
||||
|
|
@ -680,7 +537,7 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
self.verifyOpenStackUserData(ssh, config_drive_path, userdata)
|
||||
if password_test.test_presence:
|
||||
self.debug("Verifying password for vm: %s" % vm.name)
|
||||
test_result = self.verifyPassword(vm, ssh, config_drive_path,
|
||||
test_result = self.verifyPassword(ssh, config_drive_path,
|
||||
password_test)
|
||||
self.assertEqual(test_result[0], password_test.presence,
|
||||
"Expected is that password is present: %s "
|
||||
|
|
@ -769,7 +626,7 @@ class TestNuageConfigDrive(nuageTestCase):
|
|||
cmd.keypair = keypair
|
||||
cmd.account = account
|
||||
cmd.domainid = domainid
|
||||
return(self.api_client.resetSSHKeyForVirtualMachine(cmd))
|
||||
return self.api_client.resetSSHKeyForVirtualMachine(cmd)
|
||||
|
||||
def update_sshkeypair(self, vm):
|
||||
vm.stop(self.api_client)
|
||||
|
|
|
|||
|
|
@ -98,12 +98,9 @@ class TestNuageExtraDhcp(nuageTestCase):
|
|||
cls.expected_dhcp_options_on_vm = {}
|
||||
cls.dhcp_options_map_keys = [1, 16, 28, 41, 64, 93]
|
||||
|
||||
cls._cleanup = [
|
||||
cls.shared_network_all,
|
||||
cls.shared_network_offering,
|
||||
cls.account
|
||||
]
|
||||
return
|
||||
cls._cleanup.append(cls.account)
|
||||
cls._cleanup.append(cls.shared_network_offering)
|
||||
cls._cleanup.append(cls.shared_network_all)
|
||||
|
||||
def setUp(self):
|
||||
self.vmdata["displayname"] = "vm"
|
||||
|
|
@ -273,16 +270,6 @@ class TestNuageExtraDhcp(nuageTestCase):
|
|||
# Cleanup resources used
|
||||
self.debug("Cleaning up the resources")
|
||||
self.update_NuageVspGlobalDomainTemplateName(name="")
|
||||
for obj in reversed(self.cleanup):
|
||||
try:
|
||||
if isinstance(obj, VirtualMachine):
|
||||
obj.delete(self.api_client, expunge=True)
|
||||
else:
|
||||
obj.delete(self.api_client)
|
||||
except Exception as e:
|
||||
self.error("Failed to cleanup %s, got %s" % (obj, e))
|
||||
# cleanup_resources(self.api_client, self.cleanup)
|
||||
self.cleanup = []
|
||||
self.debug("Cleanup complete!")
|
||||
return
|
||||
|
||||
|
|
@ -435,6 +422,10 @@ class TestNuageExtraDhcp(nuageTestCase):
|
|||
|
||||
def verify_dhcp_on_vm(
|
||||
self, dhcpleasefile, dhcp_option_map, ssh_client, cleanlease=True):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: Skipping VM DHCP option verification")
|
||||
return
|
||||
|
||||
cmd = 'cat /var/lib/dhclient/'+dhcpleasefile
|
||||
self.debug("get content of dhcp lease file " + cmd)
|
||||
outputlist = ssh_client.execute(cmd)
|
||||
|
|
@ -1071,9 +1062,6 @@ class TestNuageExtraDhcp(nuageTestCase):
|
|||
|
||||
self.delete_VM(vm4)
|
||||
self.delete_VM(vm3)
|
||||
self.delete_Network(network)
|
||||
if vpc:
|
||||
vpc.delete(self.api_client)
|
||||
|
||||
def validate_all_extra_dhcp_for_vm_actions_in_network(
|
||||
self, network,
|
||||
|
|
|
|||
|
|
@ -25,6 +25,12 @@ from marvin.lib.base import Account, Network
|
|||
from nose.plugins.attrib import attr
|
||||
import time
|
||||
|
||||
UPDATED_DOMAIN_NAME = "update.com"
|
||||
|
||||
ISOLATED_DOMAIN_NAME = "isolated.com"
|
||||
|
||||
VPC_DOMAIN_NAME = "vpc.com"
|
||||
|
||||
|
||||
class TestNuageInternalDns(nuageTestCase):
|
||||
DNS = "06"
|
||||
|
|
@ -98,6 +104,33 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.debug("Successfully verified the creation and value of DHCP "
|
||||
"option type - %s in VSD" % dhcp_type)
|
||||
|
||||
def vm_verify_ping(self, src_vm, public_ip, dst_vm, domain_name):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: not verifying ping.")
|
||||
return
|
||||
|
||||
src_vm.ssh_ip = public_ip
|
||||
src_vm.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
src_vm.username = self.test_data["virtual_machine"]["username"]
|
||||
src_vm.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(src_vm.ssh_ip, src_vm.password))
|
||||
|
||||
ssh = self.ssh_into_VM(src_vm, public_ip)
|
||||
|
||||
cmd = 'ping -c 2 ' + dst_vm.name
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', dst_vm.name + '.' + domain_name, dst_vm.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
|
||||
def test_01_Isolated_Network_with_zone(self):
|
||||
""" Verify InternalDns on Isolated Network
|
||||
|
|
@ -113,7 +146,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
# update Network Domain at zone level
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.domain = "isolated.com"
|
||||
cmd.domain = ISOLATED_DOMAIN_NAME
|
||||
self.apiclient.updateZone(cmd)
|
||||
self.debug("Creating and enabling Nuage Vsp Isolated Network "
|
||||
"offering...")
|
||||
|
|
@ -130,11 +163,11 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
# Internal DNS check point on VSD
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "isolated.com", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, ISOLATED_DOMAIN_NAME, network_1)
|
||||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "isolated.com", nic, True)
|
||||
self.DOMAINNAME, ISOLATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
|
||||
|
|
@ -153,7 +186,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.domain = "isolated.com"
|
||||
cmd.domain = ISOLATED_DOMAIN_NAME
|
||||
self.apiclient.updateZone(cmd)
|
||||
|
||||
self.debug("Creating and enabling Nuage Vsp Isolated Network "
|
||||
|
|
@ -171,11 +204,11 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
# Internal DNS check point on VSD
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "isolated.com", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, ISOLATED_DOMAIN_NAME, network_1)
|
||||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "isolated.com", nic, True)
|
||||
self.DOMAINNAME, ISOLATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
self.test_data["virtual_machine"]["displayname"] = "vm2"
|
||||
|
|
@ -187,39 +220,13 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
for nic in vm_2.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "isolated.com", nic, True)
|
||||
self.DOMAINNAME, ISOLATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm2", nic, True)
|
||||
|
||||
public_ip_1 = self.acquire_PublicIPAddress(network_1)
|
||||
self.create_and_verify_fw(vm_1, public_ip_1, network_1)
|
||||
|
||||
vm_public_ip = public_ip_1.ipaddress.ipaddress
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.isolated.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, ISOLATED_DOMAIN_NAME)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
|
||||
def test_03_Isolated_Network_restarts(self):
|
||||
|
|
@ -239,7 +246,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.domain = "isolated.com"
|
||||
cmd.domain = ISOLATED_DOMAIN_NAME
|
||||
self.apiclient.updateZone(cmd)
|
||||
|
||||
self.debug("Creating and enabling Nuage Vsp Isolated Network "
|
||||
|
|
@ -257,11 +264,11 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
# Internal DNS check point on VSD
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "isolated.com", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, ISOLATED_DOMAIN_NAME, network_1)
|
||||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "isolated.com", nic, True)
|
||||
self.DOMAINNAME, ISOLATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
self.test_data["virtual_machine"]["displayname"] = "vm2"
|
||||
|
|
@ -273,40 +280,13 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
for nic in vm_2.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "isolated.com", nic, True)
|
||||
self.DOMAINNAME, ISOLATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm2", nic, True)
|
||||
|
||||
public_ip_1 = self.acquire_PublicIPAddress(network_1)
|
||||
self.create_and_verify_fw(vm_1, public_ip_1, network_1)
|
||||
|
||||
vm_public_ip = public_ip_1.ipaddress.ipaddress
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
time.sleep(30)
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.isolated.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, ISOLATED_DOMAIN_NAME)
|
||||
|
||||
# Restarting Isolated network (cleanup = false)
|
||||
self.debug("Restarting the created Isolated network without "
|
||||
|
|
@ -324,31 +304,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_vm(vm_1)
|
||||
self.verify_vsd_vm(vm_2)
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.isolated.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, ISOLATED_DOMAIN_NAME)
|
||||
|
||||
# Restarting Isolated network (cleanup = true)
|
||||
self.debug("Restarting the created Isolated network with cleanup...")
|
||||
|
|
@ -365,31 +321,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_vm(vm_1)
|
||||
self.verify_vsd_vm(vm_2)
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.isolated.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, ISOLATED_DOMAIN_NAME)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
|
||||
def test_04_Update_Network_with_Domain(self):
|
||||
|
|
@ -407,7 +339,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
# update Network Domain at zone level
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.domain = "isolated.com"
|
||||
cmd.domain = ISOLATED_DOMAIN_NAME
|
||||
self.apiclient.updateZone(cmd)
|
||||
|
||||
self.debug("Creating and enabling Nuage Vsp Isolated Network "
|
||||
|
|
@ -429,23 +361,23 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "isolated.com", nic, True)
|
||||
self.DOMAINNAME, ISOLATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
update_response = Network.update(
|
||||
network_1, self.apiclient, id=network_1.id,
|
||||
networkdomain="update.com", changecidr=False)
|
||||
networkdomain=UPDATED_DOMAIN_NAME, changecidr=False)
|
||||
completeoutput = str(update_response).strip('[]')
|
||||
self.debug("network update response is " + completeoutput)
|
||||
self.assertEqual("update.com", update_response.networkdomain,
|
||||
self.assertEqual(UPDATED_DOMAIN_NAME, update_response.networkdomain,
|
||||
"Network Domain is not updated as expected"
|
||||
)
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "update.com", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, UPDATED_DOMAIN_NAME, network_1)
|
||||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "update.com", nic, True)
|
||||
self.DOMAINNAME, UPDATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
|
||||
|
|
@ -465,7 +397,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
# update Network Domain at zone level
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.domain = "isolated.com"
|
||||
cmd.domain = ISOLATED_DOMAIN_NAME
|
||||
self.apiclient.updateZone(cmd)
|
||||
|
||||
self.debug("Creating and enabling Nuage Vsp Isolated Network "
|
||||
|
|
@ -484,27 +416,27 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
# Internal DNS check point on VSD
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "isolated.com", network_1)
|
||||
self.DOMAINNAME, ISOLATED_DOMAIN_NAME, network_1)
|
||||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "isolated.com", nic, True)
|
||||
self.DOMAINNAME, ISOLATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
update_response = Network.update(
|
||||
network_1, self.apiclient, id=network_1.id,
|
||||
networkdomain="update.com", changecidr=False)
|
||||
networkdomain=UPDATED_DOMAIN_NAME, changecidr=False)
|
||||
completeoutput = str(update_response).strip('[]')
|
||||
self.debug("network update response is " + completeoutput)
|
||||
self.assertEqual("update.com", update_response.networkdomain,
|
||||
self.assertEqual(UPDATED_DOMAIN_NAME, update_response.networkdomain,
|
||||
"Network Domain is not updated as expected"
|
||||
)
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "update.com", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, UPDATED_DOMAIN_NAME, network_1)
|
||||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "update.com", nic, True)
|
||||
self.DOMAINNAME, UPDATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
# stop and start VM to get new DHCP option
|
||||
|
|
@ -522,7 +454,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
for nic in vm_2.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(
|
||||
self.DOMAINNAME, "update.com", nic, True)
|
||||
self.DOMAINNAME, UPDATED_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm2", nic, True)
|
||||
|
||||
try:
|
||||
|
|
@ -533,33 +465,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
public_ip_1 = self.acquire_PublicIPAddress(network_1)
|
||||
self.create_and_verify_fw(vm_1, public_ip_1, network_1)
|
||||
|
||||
vm_public_ip = public_ip_1.ipaddress.ipaddress
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception: %s " % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.update.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, UPDATED_DOMAIN_NAME)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
|
||||
def test_06_VPC_Network_With_InternalDns(self):
|
||||
|
|
@ -571,10 +477,9 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
# 2. Deploy vm1 in tier network.
|
||||
# 3. Verify dhcp option 06 and 0f for subnet
|
||||
# 4. Verify dhcp option 06,15 and 0f for vm Interface.
|
||||
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.domain = "vpc.com"
|
||||
cmd.domain = VPC_DOMAIN_NAME
|
||||
self.apiclient.updateZone(cmd)
|
||||
vpc_off = self.create_VpcOffering(self.dnsdata["vpc_offering"])
|
||||
self.validate_VpcOffering(vpc_off, state="Enabled")
|
||||
|
|
@ -596,10 +501,10 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
# Internal DNS check point on VSD
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "vpc.com", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, network_1)
|
||||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "vpc.com", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
|
||||
|
|
@ -617,7 +522,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.domain = "vpc.com"
|
||||
cmd.domain = VPC_DOMAIN_NAME
|
||||
self.apiclient.updateZone(cmd)
|
||||
|
||||
vpc_off = self.create_VpcOffering(self.dnsdata["vpc_offering"])
|
||||
|
|
@ -641,7 +546,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "vpc.com", network_1)
|
||||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "vpc.com", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
self.test_data["virtual_machine"]["displayname"] = "vm2"
|
||||
|
|
@ -652,7 +557,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_vm(vm_2)
|
||||
for nic in vm_2.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "vpc.com", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm2", nic, True)
|
||||
|
||||
public_ip_1 = self.acquire_PublicIPAddress(network_1, vpc)
|
||||
|
|
@ -665,33 +570,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
# VSD verification
|
||||
self.verify_vsd_firewall_rule(public_ssh_rule)
|
||||
vm_public_ip = public_ip_1.ipaddress.ipaddress
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.vpc.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
|
||||
def test_08_VPC_Network_Restarts_With_InternalDns(self):
|
||||
|
|
@ -710,7 +589,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.domain = "vpc.com"
|
||||
cmd.domain = VPC_DOMAIN_NAME
|
||||
self.apiclient.updateZone(cmd)
|
||||
|
||||
vpc_off = self.create_VpcOffering(self.dnsdata["vpc_offering"])
|
||||
|
|
@ -731,10 +610,10 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_vm(vm_1)
|
||||
# Internal DNS check point on VSD
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "vpc.com", network_1)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, network_1)
|
||||
for nic in vm_1.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "vpc.com", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
|
||||
|
||||
self.test_data["virtual_machine"]["displayname"] = "vm2"
|
||||
|
|
@ -745,7 +624,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_vm(vm_2)
|
||||
for nic in vm_2.nic:
|
||||
self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, "vpc.com", nic, True)
|
||||
self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
|
||||
self.verify_vsd_dhcp_option(self.HOSTNAME, "vm2", nic, True)
|
||||
|
||||
public_ip_1 = self.acquire_PublicIPAddress(network_1, vpc)
|
||||
|
|
@ -758,33 +637,8 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
|
||||
# VSD verification
|
||||
self.verify_vsd_firewall_rule(public_ssh_rule)
|
||||
vm_public_ip = public_ip_1.ipaddress.ipaddress
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.vpc.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)
|
||||
|
||||
# Restarting VPC network (cleanup = false)
|
||||
self.debug("Restarting the created VPC network without cleanup...")
|
||||
|
|
@ -801,31 +655,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_vm(vm_1)
|
||||
self.verify_vsd_vm(vm_2)
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.vpc.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)
|
||||
|
||||
# Restarting VPC network (cleanup = true)
|
||||
self.debug("Restarting the created VPC network with cleanup...")
|
||||
|
|
@ -842,31 +672,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_vm(vm_1)
|
||||
self.verify_vsd_vm(vm_2)
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.vpc.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)
|
||||
|
||||
# Restarting VPC (cleanup = false)
|
||||
self.debug("Restarting the VPC without cleanup...")
|
||||
|
|
@ -882,31 +688,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_router(vr)
|
||||
self.verify_vsd_vm(vm_1)
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.vpc.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)
|
||||
|
||||
# Restarting VPC (cleanup = true)
|
||||
self.debug("Restarting the VPC with cleanup...")
|
||||
|
|
@ -922,31 +704,7 @@ class TestNuageInternalDns(nuageTestCase):
|
|||
self.verify_vsd_router(vr)
|
||||
self.verify_vsd_vm(vm_1)
|
||||
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
cmd = 'ping -c 2 vm2'
|
||||
self.debug("ping vm2 by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
expectedlist = ['2 received', 'vm2.vpc.com', vm_2.ipaddress]
|
||||
for item in expectedlist:
|
||||
if item in completeoutput:
|
||||
self.debug("excepted value found in vm: " + item)
|
||||
else:
|
||||
self.fail("excepted value not found in vm: " + item)
|
||||
self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
|
||||
def test_09_update_network_offering_isolated_network(self):
|
||||
|
|
|
|||
|
|
@ -31,29 +31,6 @@ import unittest
|
|||
import re
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test network services
|
||||
"""
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"shared_network_offering": {
|
||||
"name": "MySharedOffering-shared",
|
||||
"displaytext": "MySharedOffering",
|
||||
"guestiptype": "Shared",
|
||||
"supportedservices": "Dhcp,Dns,UserData",
|
||||
"specifyVlan": "True",
|
||||
"specifyIpRanges": "True",
|
||||
"traffictype": "GUEST",
|
||||
"tags": "native",
|
||||
"serviceProviderList": {
|
||||
"Dhcp": "VirtualRouter",
|
||||
"Dns": "VirtualRouter",
|
||||
"UserData": "VirtualRouter"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class TestNuageMigration(nuageTestCase):
|
||||
"""Test Native to Nuage Migration
|
||||
"""
|
||||
|
|
@ -61,7 +38,6 @@ class TestNuageMigration(nuageTestCase):
|
|||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(TestNuageMigration, cls).setUpClass()
|
||||
cls.services = Services().services
|
||||
|
||||
if not hasattr(cls.vsp_physical_network, "tags") \
|
||||
or cls.vsp_physical_network.tags != 'nuage':
|
||||
|
|
|
|||
|
|
@ -138,6 +138,10 @@ class TestNuagePasswordReset(nuageTestCase):
|
|||
# cloud-set-guest-password script from people.apache.org in the given VM
|
||||
# (SSH client)
|
||||
def install_cloud_set_guest_password_script(self, ssh_client):
|
||||
if self.isSimulator:
|
||||
self.debug( "Simulator Environment: Skipping installing"
|
||||
" cloud-set-guest-password script")
|
||||
return
|
||||
self.debug("Installing cloud-set-guest-password script")
|
||||
cmd = "cd /etc/init.d;wget http://people.apache.org/~tsp/" \
|
||||
"cloud-set-guest-password"
|
||||
|
|
@ -254,6 +258,10 @@ class TestNuagePasswordReset(nuageTestCase):
|
|||
self.create_and_verify_fw(self.vm_1, public_ip_1, self.network)
|
||||
ssh = self.ssh_into_VM(self.vm_1, public_ip_1)
|
||||
user_data_cmd = self.get_userdata_url(self.vm_1)
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: ending test early "
|
||||
"because we don't have real vms")
|
||||
return
|
||||
self.debug("Getting user data with command: " + user_data_cmd)
|
||||
actual_user_data = base64.b64decode(self.execute_cmd
|
||||
(ssh, user_data_cmd))
|
||||
|
|
@ -261,7 +269,7 @@ class TestNuagePasswordReset(nuageTestCase):
|
|||
", Expected user data - " + expected_user_data)
|
||||
self.assertEqual(actual_user_data, expected_user_data,
|
||||
"Un-expected VM (VM_1) user data"
|
||||
)
|
||||
)
|
||||
|
||||
self.debug("Checking for cloud-set-guest-password script in the "
|
||||
"VM for testing password reset functionality...")
|
||||
|
|
@ -269,6 +277,7 @@ class TestNuagePasswordReset(nuageTestCase):
|
|||
ls_result = self.execute_cmd(ssh, ls_cmd)
|
||||
ls_result = ls_result.lower()
|
||||
self.debug("Response from ls_cmd: " + ls_result)
|
||||
|
||||
if "no such file" in ls_result:
|
||||
self.debug("No cloud-set-guest-password script in the VM")
|
||||
self.debug("Installing the cloud-set-guest-password script "
|
||||
|
|
|
|||
|
|
@ -409,7 +409,7 @@ class TestNuageSharedNetworkUserdata(nuageTestCase):
|
|||
except Exception as e:
|
||||
self.debug("Deploy vm fails as expected with exception %s" % e)
|
||||
self.debug("Going to verify the exception message")
|
||||
exceptionmsg = "it is reserved for the VR in network"
|
||||
exceptionmsg = "Unable to start a VM due to insufficient capacity"
|
||||
if exceptionmsg in str(e):
|
||||
self.debug("correct exception is raised")
|
||||
else:
|
||||
|
|
@ -819,8 +819,7 @@ class TestNuageSharedNetworkUserdata(nuageTestCase):
|
|||
"""
|
||||
self.updateTemplate(True)
|
||||
self.debug("Deploy VM to shared Network scope as all")
|
||||
self.test_data["virtual_machine"]["ipaddress"] = \
|
||||
self.nuagenetworkdata["network_all"]["endip"]
|
||||
self.test_data["virtual_machine"]["ipaddress"] = None
|
||||
vm_1 = self.create_VM(
|
||||
self.shared_network_all, account=self.account_d11a)
|
||||
|
||||
|
|
@ -847,8 +846,7 @@ class TestNuageSharedNetworkUserdata(nuageTestCase):
|
|||
"""
|
||||
self.updateTemplate(True)
|
||||
self.debug("Deploy VM to shared Network scope as all")
|
||||
self.test_data["virtual_machine"]["ipaddress"] = \
|
||||
self.nuagenetworkdata["network_all"]["endip"]
|
||||
self.test_data["virtual_machine"]["ipaddress"] = None
|
||||
vm_1 = self.create_VM(
|
||||
self.shared_network_domain_with_subdomain_d11,
|
||||
account=self.account_d11a)
|
||||
|
|
@ -876,8 +874,7 @@ class TestNuageSharedNetworkUserdata(nuageTestCase):
|
|||
"""
|
||||
self.updateTemplate(True)
|
||||
self.debug("Deploy VM to shared Network scope as all")
|
||||
self.test_data["virtual_machine"]["ipaddress"] = \
|
||||
self.nuagenetworkdata["network_all"]["endip"]
|
||||
self.test_data["virtual_machine"]["ipaddress"] = None
|
||||
vm_1 = self.create_VM(
|
||||
self.shared_network_account_d111a, account=self.account_d11a)
|
||||
|
||||
|
|
@ -901,8 +898,6 @@ class TestNuageSharedNetworkUserdata(nuageTestCase):
|
|||
"""
|
||||
|
||||
try:
|
||||
self.test_data["virtual_machine"]["ipaddress"] = \
|
||||
self.nuagenetworkdata["network_all"]["endip"]
|
||||
vm_1 = self.create_VM(
|
||||
self.shared_network_domain_with_subdomain_d11,
|
||||
account=self.account_d11a)
|
||||
|
|
|
|||
|
|
@ -140,12 +140,15 @@ class TestNuageSourceNat(nuageTestCase):
|
|||
self.verify_vsd_firewall_rule(public_ssh_rule)
|
||||
|
||||
# Checking for wget file
|
||||
ssh_client = self.ssh_into_VM(vm, public_ip)
|
||||
cmd = "ls /"
|
||||
file_list = self.execute_cmd(ssh_client, cmd)
|
||||
if "index.html" in str(file_list):
|
||||
cmd = "rm -rf /index.html*"
|
||||
self.execute_cmd(ssh_client, cmd)
|
||||
is_in_file_list = None
|
||||
if not self.isSimulator:
|
||||
ssh_client = self.ssh_into_VM(vm, public_ip)
|
||||
cmd = "ls /"
|
||||
file_list = self.execute_cmd(ssh_client, cmd)
|
||||
is_in_file_list = "index.html" in str(file_list)
|
||||
if is_in_file_list:
|
||||
cmd = "rm -rf /index.html*"
|
||||
self.execute_cmd(ssh_client, cmd)
|
||||
|
||||
# Removing Ingress Firewall/Network ACL rule
|
||||
self.debug("Removing the created Ingress Firewall/Network ACL "
|
||||
|
|
@ -194,11 +197,11 @@ class TestNuageSourceNat(nuageTestCase):
|
|||
"VSD")
|
||||
|
||||
# Final test result
|
||||
if "index.html" in str(file_list):
|
||||
if is_in_file_list:
|
||||
self.debug("Successfully verified Source NAT traffic "
|
||||
"(wget www.google.com) to the Internet from VM - %s"
|
||||
% vm.name)
|
||||
else:
|
||||
elif not self.isSimulator:
|
||||
self.fail("Failed to verify Source NAT traffic "
|
||||
"(wget www.google.com) to the Internet from VM - %s"
|
||||
% vm.name)
|
||||
|
|
|
|||
|
|
@ -112,6 +112,10 @@ class TestNuageStaticNat(nuageTestCase):
|
|||
# server running on the corresponding VM in the given network
|
||||
def verify_StaticNAT_traffic(self, network, public_ip, vpc=None,
|
||||
non_default_nic=False):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: skipping static nat"
|
||||
"traffic tests.")
|
||||
return
|
||||
# Adding Ingress Firewall/Network ACL rule
|
||||
self.debug("Adding Ingress Firewall/Network ACL rule to make the "
|
||||
"created Static NAT rule (wget) accessible...")
|
||||
|
|
@ -186,6 +190,11 @@ class TestNuageStaticNat(nuageTestCase):
|
|||
def verify_StaticNAT_Internet_traffic(self, vm, network, public_ip,
|
||||
vpc=None, non_default_nic=False,
|
||||
negative_test=False):
|
||||
if self.isSimulator and not negative_test:
|
||||
self.debug("Simulator Environment: not verifying internet traffic")
|
||||
return
|
||||
elif self.isSimulator:
|
||||
raise Exception("Simulator simulating exception")
|
||||
# Adding Ingress Firewall/Network ACL rule
|
||||
self.debug("Adding Ingress Firewall/Network ACL rule to make the "
|
||||
"created Static NAT rule (SSH) accessible...")
|
||||
|
|
@ -1677,15 +1686,17 @@ class TestNuageStaticNat(nuageTestCase):
|
|||
self.verify_vsd_floating_ip(network_2, vm, public_ip_2.ipaddress)
|
||||
|
||||
# Verifying Static NAT traffic
|
||||
with self.assertRaises(AssertionError):
|
||||
self.verify_StaticNAT_traffic(network_1, public_ip_1)
|
||||
if not self.isSimulator:
|
||||
with self.assertRaises(AssertionError):
|
||||
self.verify_StaticNAT_traffic(network_1, public_ip_1)
|
||||
self.debug("Static NAT rule not enabled in this VM NIC")
|
||||
self.verify_StaticNAT_traffic(network_2, public_ip_2)
|
||||
|
||||
# Verifying Static NAT traffic (wget www.google.com) to the Internet
|
||||
# from the deployed VM
|
||||
with self.assertRaises(Exception):
|
||||
self.verify_StaticNAT_Internet_traffic(vm, network_1, public_ip_1)
|
||||
if not self.isSimulator:
|
||||
with self.assertRaises(Exception):
|
||||
self.verify_StaticNAT_Internet_traffic(vm, network_1, public_ip_1)
|
||||
self.debug("Static NAT rule not enabled in this VM NIC")
|
||||
self.verify_StaticNAT_Internet_traffic(vm, network_2, public_ip_2)
|
||||
|
||||
|
|
|
|||
|
|
@ -171,6 +171,10 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
# verify_vpc_vm_ingress_traffic - Verifies ingress traffic to the given VM
|
||||
# (SSH into VM) via a created Static NAT rule in the given VPC network
|
||||
def verify_vpc_vm_ingress_traffic(self, vm, network, vpc):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: "
|
||||
"skipping vpc vm ingress traffic tests.")
|
||||
return
|
||||
self.debug("Verifying ingress traffic to the VM (SSH into VM) - %s "
|
||||
"via a created Static NAT rule in the VPC network - %s" %
|
||||
(vm, network))
|
||||
|
|
@ -236,6 +240,9 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
# wget_from_vm_cmd - From within the given VM (ssh client),
|
||||
# fetches index.html file of web server running with the given public IP
|
||||
def wget_from_vm_cmd(self, ssh_client, ip_address, port):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: not wgeting from vm cmd.")
|
||||
return
|
||||
wget_file = ""
|
||||
cmd = "rm -rf index.html*"
|
||||
self.execute_cmd(ssh_client, cmd)
|
||||
|
|
@ -260,6 +267,9 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
# belongs to the given Internal LB rule
|
||||
# assigned VMs (vm array)
|
||||
def verify_lb_wget_file(self, wget_file, vm_array):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: not verifying file on vm.")
|
||||
return
|
||||
wget_server_ip = None
|
||||
for vm in vm_array:
|
||||
for nic in vm.nic:
|
||||
|
|
@ -1406,6 +1416,10 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
# VSD verification
|
||||
self.verify_vsd_firewall_rule(public_ssh_rule)
|
||||
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: skipping traffic tests.")
|
||||
return
|
||||
|
||||
# Internal LB (wget) traffic tests
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
wget_file_1 = self.wget_from_vm_cmd(
|
||||
|
|
@ -1663,6 +1677,10 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
# VSD verification
|
||||
self.verify_vsd_firewall_rule(public_ssh_rule)
|
||||
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: skipping traffic tests.")
|
||||
return
|
||||
|
||||
# Internal LB (wget) traffic tests with Round Robin Algorithm
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
self.validate_internallb_algorithm_traffic(
|
||||
|
|
@ -1863,14 +1881,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vsd_firewall_rule(public_ssh_rule)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
|
||||
# Restart Internal tier (cleanup = false)
|
||||
# InternalLbVm gets destroyed and deployed again in the Internal tier
|
||||
|
|
@ -1909,23 +1921,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
tries = 0
|
||||
while tries < 120:
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
if wget_file != "":
|
||||
break
|
||||
self.debug("Waiting for the InternalLbVm in the Internal tier to "
|
||||
"be fully resolved for (wget) traffic test...")
|
||||
time.sleep(5)
|
||||
tries += 1
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
# Restart Internal tier (cleanup = true)
|
||||
# InternalLbVm gets destroyed and deployed again in the Internal tier
|
||||
self.debug("Restarting the Internal tier with cleanup...")
|
||||
|
|
@ -1963,22 +1960,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
tries = 0
|
||||
while tries < 120:
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
if wget_file != "":
|
||||
break
|
||||
self.debug("Waiting for the InternalLbVm in the Internal tier to "
|
||||
"be fully resolved for (wget) traffic test...")
|
||||
time.sleep(5)
|
||||
tries += 1
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
|
||||
# Restart Public tier (cleanup = false)
|
||||
# This restart has no effect on the InternalLbVm functionality
|
||||
|
|
@ -2057,14 +2040,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
|
||||
# Stopping VMs in the Internal tier
|
||||
# wget traffic test fails as all the VMs in the Internal tier are in
|
||||
|
|
@ -2095,17 +2072,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vsd_lb_device(int_lb_vm)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
with self.assertRaises(Exception):
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.debug("Failed to wget file as all the VMs in the Internal tier "
|
||||
"are in stopped state")
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm, should_fail=True)
|
||||
|
||||
# Starting VMs in the Internal tier
|
||||
# wget traffic test succeeds as all the VMs in the Internal tier are
|
||||
|
|
@ -2144,22 +2112,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
tries = 0
|
||||
while tries < 120:
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
if wget_file != "":
|
||||
break
|
||||
self.debug("Waiting for the InternalLbVm in the Internal tier to "
|
||||
"be fully resolved for (wget) traffic test...")
|
||||
time.sleep(5)
|
||||
tries += 1
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
|
||||
# Restarting VPC (cleanup = false)
|
||||
# VPC VR gets destroyed and deployed again in the VPC
|
||||
|
|
@ -2206,14 +2160,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
|
||||
# Restarting VPC (cleanup = true)
|
||||
# VPC VR gets destroyed and deployed again in the VPC
|
||||
|
|
@ -2269,6 +2217,35 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
|
||||
def verify_internal_lb_wget_traffic(self, int_lb_rule_1, internal_vm, internal_vm_1, internal_vm_2, public_ip, public_vm, should_fail=False):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: not running wget traffic tests.")
|
||||
return
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
tries = 0
|
||||
wget_file = None
|
||||
while tries < 120:
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
if wget_file != "":
|
||||
break
|
||||
self.debug("Waiting for the InternalLbVm in the Internal tier to "
|
||||
"be fully resolved for (wget) traffic test...")
|
||||
time.sleep(5)
|
||||
tries += 1
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
if should_fail:
|
||||
with self.assertRaises(Exception):
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.debug("Failed to wget file as all the VMs in the Internal tier "
|
||||
"are in stopped state")
|
||||
else:
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
|
||||
@skip
|
||||
# Skip until CLOUDSTACK-9837 is fixed
|
||||
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
|
||||
|
|
@ -2452,14 +2429,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vsd_firewall_rule(public_ssh_rule)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
|
||||
# # Stopping the InternalLbVm when the VPC VR is in Stopped state
|
||||
self.stop_InternalLbVm(int_lb_vm)
|
||||
|
|
@ -2478,17 +2449,9 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
with self.assertRaises(Exception):
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.debug("Failed to wget file as the InternalLbVm is in stopped"
|
||||
" state")
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm,
|
||||
should_fail=True)
|
||||
|
||||
# # Starting the InternalLbVm when the VPC VR is in Stopped state
|
||||
self.start_InternalLbVm(int_lb_vm)
|
||||
|
|
@ -2507,23 +2470,9 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
tries = 0
|
||||
while tries < 120:
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
if wget_file != "":
|
||||
break
|
||||
self.debug("Waiting for the InternalLbVm in the Internal tier to "
|
||||
"be fully resolved for (wget) traffic test...")
|
||||
time.sleep(5)
|
||||
tries += 1
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
# Bug CLOUDSTACK-9837
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
|
||||
# Starting the VPC VR
|
||||
# VPC VR has no effect on the InternalLbVm functionality
|
||||
|
|
@ -2554,17 +2503,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
with self.assertRaises(Exception):
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.debug("Failed to wget file as the InternalLbVm is in stopped"
|
||||
" state")
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm, should_fail=True)
|
||||
|
||||
# # Starting the InternalLbVm when the VPC VR is in Running state
|
||||
self.start_InternalLbVm(int_lb_vm)
|
||||
|
|
@ -2583,22 +2523,8 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
tries = 0
|
||||
while tries < 120:
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
if wget_file != "":
|
||||
break
|
||||
self.debug("Waiting for the InternalLbVm in the Internal tier to "
|
||||
"be fully resolved for (wget) traffic test...")
|
||||
time.sleep(5)
|
||||
tries += 1
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
|
||||
# # Force Stopping the InternalLbVm when the VPC VR is in Running state
|
||||
self.stop_InternalLbVm(int_lb_vm, force=True)
|
||||
|
|
@ -2617,17 +2543,9 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
with self.assertRaises(Exception):
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.debug("Failed to wget file as the InternalLbVm is in stopped"
|
||||
" state")
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm,
|
||||
should_fail=True)
|
||||
|
||||
# # Starting the InternalLbVm when the VPC VR is in Running state
|
||||
self.start_InternalLbVm(int_lb_vm)
|
||||
|
|
@ -2646,19 +2564,5 @@ class TestNuageInternalLb(nuageTestCase):
|
|||
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
|
||||
|
||||
# Internal LB (wget) traffic test
|
||||
ssh_client = self.ssh_into_VM(public_vm, public_ip)
|
||||
tries = 0
|
||||
while tries < 120:
|
||||
wget_file = self.wget_from_vm_cmd(
|
||||
ssh_client, int_lb_rule_1.sourceipaddress,
|
||||
self.test_data["http_rule"]["publicport"])
|
||||
if wget_file != "":
|
||||
break
|
||||
self.debug("Waiting for the InternalLbVm in the Internal tier to "
|
||||
"be fully resolved for (wget) traffic test...")
|
||||
time.sleep(5)
|
||||
tries += 1
|
||||
|
||||
# Verifying Internal LB (wget) traffic test
|
||||
self.verify_lb_wget_file(
|
||||
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
||||
self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
|
||||
internal_vm_2, public_ip, public_vm)
|
||||
|
|
|
|||
|
|
@ -453,6 +453,8 @@ class TestNuageDomainTemplate(nuageTestCase):
|
|||
"(tier) network gets created on CloudStack as the "
|
||||
"associated pre-configured Nuage VSP domain template is no "
|
||||
"longer existing in VSD")
|
||||
for vpc_2_tier in Network.list(self.api_client, vpcid=vpc_2.id):
|
||||
Network(vpc_2_tier.__dict__).delete(self.api_client)
|
||||
|
||||
# Re-creating the associated pre-configured Nuage VSP domain template
|
||||
new_domain_template = self.vsdk.NUDomainTemplate(
|
||||
|
|
@ -504,6 +506,8 @@ class TestNuageDomainTemplate(nuageTestCase):
|
|||
"Network ACLs from CloudStack is not supported when the "
|
||||
"VPC is associated with a Nuage VSP pre-configured domain "
|
||||
"template")
|
||||
for vpc_3_tier in Network.list(self.api_client, vpcid=vpc_3.id):
|
||||
Network(vpc_3_tier.__dict__).delete(self.api_client)
|
||||
|
||||
vpc_3_tier_1 = self.create_Network(
|
||||
self.network_offering, gateway='10.1.2.1', vpc=vpc_3)
|
||||
|
|
|
|||
|
|
@ -98,12 +98,29 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
self.cleanup = [self.account]
|
||||
return
|
||||
|
||||
def verify_ping_to_vm(self, src_vm, dst_vm, public_ip, dst_hostname=None):
|
||||
if self.isSimulator:
|
||||
self.debug("Simulator Environment: not verifying pinging")
|
||||
return
|
||||
try:
|
||||
src_vm.ssh_ip = public_ip.ipaddress.ipaddress
|
||||
src_vm.ssh_port = self.test_data["virtual_machine"]["ssh_port"]
|
||||
src_vm.username = self.test_data["virtual_machine"]["username"]
|
||||
src_vm.password = self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(src_vm.ssh_ip, src_vm.password))
|
||||
|
||||
ssh = self.ssh_into_VM(src_vm, public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
self.verify_pingtovmipaddress(ssh, dst_vm.ipaddress)
|
||||
if dst_hostname:
|
||||
self.verify_pingtovmipaddress(ssh, dst_hostname)
|
||||
|
||||
def verify_pingtovmipaddress(self, ssh, pingtovmipaddress):
|
||||
"""verify ping to ipaddress of the vm and retry 3 times"""
|
||||
|
||||
if self.isSimulator:
|
||||
return
|
||||
|
||||
successfull_ping = False
|
||||
nbr_retries = 0
|
||||
max_retries = 5
|
||||
|
|
@ -126,30 +143,6 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
if not successfull_ping:
|
||||
self.fail("FAILED TEST as excepted value not found in vm")
|
||||
|
||||
def verify_pingtovmhostname(self, ssh, pingtovmhostname):
|
||||
"""verify ping to hostname of the vm and retry 3 times"""
|
||||
successfull_ping = False
|
||||
nbr_retries = 0
|
||||
max_retries = 5
|
||||
cmd = 'ping -c 2 ' + pingtovmhostname
|
||||
|
||||
while not successfull_ping and nbr_retries < max_retries:
|
||||
self.debug("ping vm by hostname with command: " + cmd)
|
||||
outputlist = ssh.execute(cmd)
|
||||
self.debug("command is executed properly " + cmd)
|
||||
completeoutput = str(outputlist).strip('[]')
|
||||
self.debug("complete output is " + completeoutput)
|
||||
if '2 received' in completeoutput:
|
||||
self.debug("PASS as vm is pingeable: " + completeoutput)
|
||||
successfull_ping = True
|
||||
else:
|
||||
self.debug("FAIL as vm is not pingeable: " + completeoutput)
|
||||
time.sleep(3)
|
||||
nbr_retries = nbr_retries + 1
|
||||
|
||||
if not successfull_ping:
|
||||
self.fail("FAILED TEST as excepted value not found in vm")
|
||||
|
||||
# verify_vsd_vm - Verifies the given CloudStack VM deployment and status in
|
||||
# VSD
|
||||
def verify_vsdmngd_vm(self, vm, vsdmngd_subnet, stopped=False):
|
||||
|
|
@ -207,7 +200,6 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
zone1 = self.create_vsd_zone(domain1, "ZoneToBeConsumedByACS")
|
||||
subnet1 = self.create_vsd_subnet(zone1, "SubnetToBeConsumedByACS",
|
||||
"10.0.0.1/24")
|
||||
self.create_vsd_dhcp_option(subnet1, 15, ["nuagenetworks1.net"])
|
||||
|
||||
domain2 = self.create_vsd_domain(domain_template, enterprise,
|
||||
"2ndL3DomainToBeConsumedByACS")
|
||||
|
|
@ -227,13 +219,13 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
isolated_network = self.create_Network(
|
||||
self.nuage_isolated_network_offering,
|
||||
gateway="10.0.0.1", netmask="255.255.255.0",
|
||||
externalid=subnet1.id)
|
||||
externalid=subnet1.id, cleanup=False)
|
||||
|
||||
# On ACS create network using persistent nw offering allow
|
||||
isolated_network2 = self.create_Network(
|
||||
self.nuage_isolated_network_offering_persistent,
|
||||
gateway="10.5.0.1", netmask="255.255.255.0",
|
||||
externalid=subnet2.id)
|
||||
externalid=subnet2.id, cleanup=False)
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
self.create_Network(
|
||||
|
|
@ -255,11 +247,11 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
externalid=subnet2.id+1)
|
||||
|
||||
# verify floating ip and intra subnet connectivity
|
||||
vm_1 = self.create_VM(isolated_network)
|
||||
vm_1 = self.create_VM(isolated_network, cleanup=False)
|
||||
|
||||
self.test_data["virtual_machine"]["displayname"] = "vm2"
|
||||
self.test_data["virtual_machine"]["name"] = "vm2"
|
||||
vm_2 = self.create_VM(isolated_network)
|
||||
vm_2 = self.create_VM(isolated_network, cleanup=False)
|
||||
self.test_data["virtual_machine"]["displayname"] = None
|
||||
self.test_data["virtual_machine"]["name"] = None
|
||||
|
||||
|
|
@ -276,31 +268,12 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
public_ip, isolated_network, static_nat=True, vm=vm_1)
|
||||
self.create_FirewallRule(public_ip,
|
||||
self.test_data["ingress_rule"])
|
||||
if not self.isSimulator:
|
||||
vm_public_ip = public_ip.ipaddress.ipaddress
|
||||
try:
|
||||
vm_1.ssh_ip = vm_public_ip
|
||||
vm_1.ssh_port = \
|
||||
self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_1.username = \
|
||||
self.test_data["virtual_machine"]["username"]
|
||||
vm_1.password = \
|
||||
self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_1.ssh_ip, vm_1.password))
|
||||
self.verify_ping_to_vm(vm_1, vm_2, public_ip, "vm2")
|
||||
|
||||
ssh = vm_1.get_ssh_client(ipaddress=vm_public_ip)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
self.verify_pingtovmipaddress(ssh, vm_2.ipaddress)
|
||||
self.verify_pingtovmhostname(ssh, "vm2")
|
||||
|
||||
vm_3 = self.create_VM(isolated_network2)
|
||||
vm_3 = self.create_VM(isolated_network2, cleanup=False)
|
||||
self.test_data["virtual_machine"]["displayname"] = "vm4"
|
||||
self.test_data["virtual_machine"]["name"] = "vm4"
|
||||
vm_4 = self.create_VM(isolated_network2)
|
||||
vm_4 = self.create_VM(isolated_network2, cleanup=False)
|
||||
self.test_data["virtual_machine"]["displayname"] = None
|
||||
self.test_data["virtual_machine"]["name"] = None
|
||||
self.verify_vsd_network_not_present(isolated_network2)
|
||||
|
|
@ -313,37 +286,17 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
self.create_StaticNatRule_For_VM(vm_3, public_ip2,
|
||||
isolated_network2)
|
||||
self.validate_PublicIPAddress(
|
||||
public_ip2, isolated_network2, static_nat=True, vm=vm_3)
|
||||
public_ip2, isolated_network2, static_nat=True, vm=vm_3)
|
||||
self.create_FirewallRule(public_ip2,
|
||||
self.test_data["ingress_rule"])
|
||||
|
||||
if not self.isSimulator:
|
||||
vm_public_ip2 = public_ip2.ipaddress.ipaddress
|
||||
try:
|
||||
vm_3.ssh_ip = vm_public_ip2
|
||||
vm_3.ssh_port = \
|
||||
self.test_data["virtual_machine"]["ssh_port"]
|
||||
vm_3.username = \
|
||||
self.test_data["virtual_machine"]["username"]
|
||||
vm_3.password = \
|
||||
self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vm_3.ssh_ip, vm_3.password))
|
||||
|
||||
ssh2 = vm_3.get_ssh_client(ipaddress=vm_public_ip2)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
self.verify_pingtovmipaddress(ssh2, vm_4.ipaddress)
|
||||
self.verify_pingtovmhostname(ssh2, "vm4")
|
||||
|
||||
vm_1.delete(self.api_client, expunge=True)
|
||||
vm_2.delete(self.api_client, expunge=True)
|
||||
isolated_network.delete(self.api_client)
|
||||
vm_3.delete(self.api_client, expunge=True)
|
||||
self.verify_ping_to_vm(vm_3, vm_4, public_ip2)
|
||||
vm_4.delete(self.api_client, expunge=True)
|
||||
vm_3.delete(self.api_client, expunge=True)
|
||||
vm_2.delete(self.api_client, expunge=True)
|
||||
vm_1.delete(self.api_client, expunge=True)
|
||||
isolated_network2.delete(self.api_client)
|
||||
isolated_network.delete(self.api_client)
|
||||
self.debug("Number of loops %s" % i)
|
||||
|
||||
@attr(tags=["advanced", "nuagevsp", "vpc"], required_hardware="false")
|
||||
|
|
@ -393,11 +346,11 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
vpc = self.create_Vpc(self.nuage_vpc_offering, cidr='10.1.0.0/16')
|
||||
self.validate_Vpc(vpc, state="Enabled")
|
||||
acl_list = self.create_NetworkAclList(
|
||||
name="acl", description="acl", vpc=vpc)
|
||||
name="acl", description="acl", vpc=vpc)
|
||||
self.create_NetworkAclRule(
|
||||
self.test_data["ingress_rule"], acl_list=acl_list)
|
||||
self.test_data["ingress_rule"], acl_list=acl_list)
|
||||
self.create_NetworkAclRule(
|
||||
self.test_data["icmprule"], acl_list=acl_list)
|
||||
self.test_data["icmprule"], acl_list=acl_list)
|
||||
|
||||
self.debug("Creating another VPC with Static NAT service provider "
|
||||
"as VpcVirtualRouter")
|
||||
|
|
@ -442,7 +395,8 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
gateway='10.1.0.1',
|
||||
vpc=vpc,
|
||||
acl_list=acl_list,
|
||||
externalid=subnet1.id)
|
||||
externalid=subnet1.id,
|
||||
cleanup=False)
|
||||
self.validate_Network(vpc_tier, state="Implemented")
|
||||
self.debug("Creating 2nd VPC tier network with Static NAT service")
|
||||
|
||||
|
|
@ -458,7 +412,8 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
gateway='10.1.128.1',
|
||||
vpc=vpc,
|
||||
acl_list=acl_list,
|
||||
externalid=subnet2.id)
|
||||
externalid=subnet2.id,
|
||||
cleanup=False)
|
||||
self.validate_Network(vpc_2ndtier, state="Implemented")
|
||||
vpc_vr = self.get_Router(vpc_tier)
|
||||
self.check_Router_state(vpc_vr, state="Running")
|
||||
|
|
@ -514,17 +469,17 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
self.debug("Deploying a VM in the created VPC tier network")
|
||||
self.test_data["virtual_machine"]["displayname"] = "vpcvm1"
|
||||
self.test_data["virtual_machine"]["name"] = "vpcvm1"
|
||||
vpc_vm_1 = self.create_VM(vpc_tier)
|
||||
vpc_vm_1 = self.create_VM(vpc_tier, cleanup=False)
|
||||
self.check_VM_state(vpc_vm_1, state="Running")
|
||||
self.debug("Deploying another VM in the created VPC tier network")
|
||||
self.test_data["virtual_machine"]["displayname"] = "vpcvm2"
|
||||
self.test_data["virtual_machine"]["name"] = "vpcvm2"
|
||||
vpc_vm_2 = self.create_VM(vpc_tier)
|
||||
vpc_vm_2 = self.create_VM(vpc_tier, cleanup=False)
|
||||
self.check_VM_state(vpc_vm_2, state="Running")
|
||||
self.debug("Deploying a VM in the 2nd VPC tier network")
|
||||
self.test_data["virtual_machine"]["displayname"] = "vpcvm12"
|
||||
self.test_data["virtual_machine"]["name"] = "vpcvm12"
|
||||
vpc_vm_12 = self.create_VM(vpc_2ndtier)
|
||||
vpc_vm_12 = self.create_VM(vpc_2ndtier, cleanup=False)
|
||||
self.check_VM_state(vpc_vm_2, state="Running")
|
||||
self.test_data["virtual_machine"]["displayname"] = None
|
||||
self.test_data["virtual_machine"]["name"] = None
|
||||
|
|
@ -542,26 +497,8 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
self.validate_PublicIPAddress(
|
||||
public_ip_1, vpc_tier, static_nat=True, vm=vpc_vm_1)
|
||||
|
||||
if not self.isSimulator:
|
||||
vm_public_ip_1 = public_ip_1.ipaddress.ipaddress
|
||||
try:
|
||||
vpc_vm_1.ssh_ip = vm_public_ip_1
|
||||
vpc_vm_1.ssh_port = \
|
||||
self.test_data["virtual_machine"]["ssh_port"]
|
||||
vpc_vm_1.username = \
|
||||
self.test_data["virtual_machine"]["username"]
|
||||
vpc_vm_1.password = \
|
||||
self.test_data["virtual_machine"]["password"]
|
||||
self.debug("SSHing into VM: %s with %s" %
|
||||
(vpc_vm_1.ssh_ip, vpc_vm_1.password))
|
||||
|
||||
ssh = vpc_vm_1.get_ssh_client(ipaddress=vm_public_ip_1)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH into VM failed with exception %s" % e)
|
||||
|
||||
self.verify_pingtovmipaddress(ssh, vpc_vm_2.ipaddress)
|
||||
self.verify_pingtovmipaddress(ssh, vpc_vm_12.ipaddress)
|
||||
self.verify_ping_to_vm(vpc_vm_1, vpc_vm_2, public_ip_1)
|
||||
self.verify_ping_to_vm(vpc_vm_1, vpc_vm_12, public_ip_1)
|
||||
|
||||
vpc_vm_1.delete(self.api_client, expunge=True)
|
||||
vpc_vm_2.delete(self.api_client, expunge=True)
|
||||
|
|
@ -704,8 +641,6 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
enterprise = self.vsdk.NUEnterprise()
|
||||
enterprise.name = "EnterpriseToBeConsumedByACS"
|
||||
enterprise.description = "EnterpriseToBeConsumedByACS"
|
||||
# enterprise.external_id = "ToBeConsumedByACS@" \
|
||||
# + str(self.cms_id)
|
||||
(enterprise, connection) = self._session.user.create_child(enterprise)
|
||||
return enterprise
|
||||
|
||||
|
|
@ -738,8 +673,6 @@ class TestNuageManagedSubnets(nuageTestCase):
|
|||
domain_template = self.vsdk.NUDomainTemplate()
|
||||
domain_template.name = "L3DomainTemplateToBeConsumedByACS"
|
||||
domain_template.description = "L3DomainTemplateToBeConsumedByACS"
|
||||
# domain_template.external_id = "L3DomainTemplateToBeConsumedByACS@" \
|
||||
# + str(self.cms_id)
|
||||
(domain_template, connection) = \
|
||||
enterprise.create_child(domain_template)
|
||||
return domain_template
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -57,7 +57,7 @@ setup(name="Marvin",
|
|||
"ipmisim >= 0.7"
|
||||
],
|
||||
extras_require={
|
||||
"nuagevsp": ["libVSD", "PyYAML", "futures", "netaddr", "retries", "jpype1"]
|
||||
"nuagevsp": ["vspk", "PyYAML", "futures", "netaddr", "retries", "jpype1"]
|
||||
},
|
||||
py_modules=['marvin.marvinPlugin'],
|
||||
zip_safe=False,
|
||||
|
|
|
|||
Loading…
Reference in New Issue