mirror of https://github.com/apache/cloudstack.git
Merge branch 'main' into nsx-integration
This commit is contained in:
commit
869a51afa2
|
|
@ -15,7 +15,7 @@ was tested against a CentOS 7 x86_64 setup.
|
|||
|
||||
Install tools and dependencies used for development:
|
||||
|
||||
# yum -y install git java-11-openjdk java-11-openjdk-devel \
|
||||
# yum -y install git java-17-openjdk java-17-openjdk-devel \
|
||||
mysql mysql-server mkisofs git gcc python MySQL-python openssh-clients wget
|
||||
|
||||
Set up Maven (3.6.0):
|
||||
|
|
|
|||
|
|
@ -17,14 +17,14 @@ Description: A common package which contains files which are shared by several C
|
|||
|
||||
Package: cloudstack-management
|
||||
Architecture: all
|
||||
Depends: ${python3:Depends}, openjdk-11-jre-headless | java11-runtime-headless | java11-runtime | openjdk-11-jre-headless | zulu-11, cloudstack-common (= ${source:Version}), net-tools, sudo, python3-mysql.connector, augeas-tools, mysql-client | mariadb-client, adduser, bzip2, ipmitool, file, gawk, iproute2, qemu-utils, rng-tools, python3-dnspython, lsb-release, init-system-helpers (>= 1.14~), python3-setuptools
|
||||
Depends: ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), net-tools, sudo, python3-mysql.connector, augeas-tools, mysql-client | mariadb-client, adduser, bzip2, ipmitool, file, gawk, iproute2, qemu-utils, rng-tools, python3-dnspython, lsb-release, init-system-helpers (>= 1.14~), python3-setuptools
|
||||
Conflicts: cloud-server, cloud-client, cloud-client-ui
|
||||
Description: CloudStack server library
|
||||
The CloudStack management server
|
||||
|
||||
Package: cloudstack-agent
|
||||
Architecture: all
|
||||
Depends: ${python:Depends}, ${python3:Depends}, openjdk-11-jre-headless | java11-runtime-headless | java11-runtime | openjdk-11-jre-headless | zulu-11, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor
|
||||
Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor
|
||||
Recommends: init-system-helpers
|
||||
Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts
|
||||
Description: CloudStack agent
|
||||
|
|
@ -34,7 +34,7 @@ Description: CloudStack agent
|
|||
|
||||
Package: cloudstack-usage
|
||||
Architecture: all
|
||||
Depends: openjdk-11-jre-headless | java11-runtime-headless | java11-runtime | openjdk-11-jre-headless | zulu-11, cloudstack-common (= ${source:Version}), init-system-helpers
|
||||
Depends: openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), init-system-helpers
|
||||
Description: CloudStack usage monitor
|
||||
The CloudStack usage monitor provides usage accounting across the entire cloud for
|
||||
cloud operators to charge based on usage parameters.
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@
|
|||
<dependency>
|
||||
<groupId>com.sun.xml.bind</groupId>
|
||||
<artifactId>jaxb-impl</artifactId>
|
||||
<version>${cs.jaxb.version}</version>
|
||||
<version>${cs.jaxb.impl.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
|
|
|||
|
|
@ -44,6 +44,10 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
|||
|
||||
void revokeAccess(DataObject dataObject, Host host, DataStore dataStore);
|
||||
|
||||
default boolean requiresAccessForMigration(DataObject dataObject) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intended for managed storage (cloud.storage_pool.managed = true)
|
||||
* if not managed, return volume.getSize()
|
||||
|
|
|
|||
|
|
@ -57,6 +57,8 @@ public interface VolumeService {
|
|||
|
||||
void revokeAccess(DataObject dataObject, Host host, DataStore dataStore);
|
||||
|
||||
boolean requiresAccessForMigration(DataObject dataObject, DataStore dataStore);
|
||||
|
||||
/**
|
||||
* Creates the volume based on the given criteria
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1230,8 +1230,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
DataStore dataStore = dataStoreMgr.getDataStore(volumeForVm.getPoolId(), DataStoreRole.Primary);
|
||||
PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore;
|
||||
|
||||
// This might impact other managed storages, grant access for PowerFlex storage pool only
|
||||
if (primaryDataStore.isManaged() && primaryDataStore.getPoolType() == Storage.StoragePoolType.PowerFlex) {
|
||||
// This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only)
|
||||
if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) {
|
||||
volService.revokeAccess(volumeInfo, host, dataStore);
|
||||
}
|
||||
}
|
||||
|
|
@ -1509,8 +1509,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
disk.setDetails(getDetails(volumeInfo, dataStore));
|
||||
|
||||
PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore;
|
||||
// This might impact other managed storages, grant access for PowerFlex storage pool only
|
||||
if (primaryDataStore.isManaged() && primaryDataStore.getPoolType() == Storage.StoragePoolType.PowerFlex) {
|
||||
// This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only)
|
||||
if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) {
|
||||
volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -101,8 +101,10 @@
|
|||
for (template in templateList) {
|
||||
def data = lines.findAll { it.contains(template) }
|
||||
if (data != null) {
|
||||
def hypervisor = template.tokenize('-')[-1]
|
||||
pom.properties["$hypervisor" + ".checksum"] = data[0].tokenize(' ')[0]
|
||||
if (data.size() > 0) {
|
||||
def hypervisor = template.tokenize('-')[-1]
|
||||
pom.properties["$hypervisor" + ".checksum"] = data[0].tokenize(' ')[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
</source>
|
||||
|
|
|
|||
|
|
@ -29,6 +29,12 @@ DROP INDEX `i_resource_count__type_domaintId`,
|
|||
ADD UNIQUE INDEX `i_resource_count__type_tag_accountId` (`type`,`tag`,`account_id`),
|
||||
ADD UNIQUE INDEX `i_resource_count__type_tag_domaintId` (`type`,`tag`,`domain_id`);
|
||||
|
||||
-- Update Default System offering for Router to 512MiB
|
||||
UPDATE `cloud`.`service_offering` SET ram_size = 512 WHERE unique_name IN ("Cloud.Com-SoftwareRouter", "Cloud.Com-SoftwareRouter-Local",
|
||||
"Cloud.Com-InternalLBVm", "Cloud.Com-InternalLBVm-Local",
|
||||
"Cloud.Com-ElasticLBVm", "Cloud.Com-ElasticLBVm-Local")
|
||||
AND system_use = 1 AND ram_size < 512;
|
||||
|
||||
-- NSX Plugin --
|
||||
CREATE TABLE `cloud`.`nsx_providers` (
|
||||
`id` bigint unsigned NOT NULL auto_increment COMMENT 'id',
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@
|
|||
-- VIEW `cloud`.`domain_view`;
|
||||
|
||||
DROP VIEW IF EXISTS `cloud`.`domain_view`;
|
||||
|
||||
CREATE VIEW `cloud`.`domain_view` AS
|
||||
select
|
||||
`domain`.`id` AS `id`,
|
||||
|
|
|
|||
|
|
@ -268,6 +268,19 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean requiresAccessForMigration(DataObject dataObject, DataStore dataStore) {
|
||||
DataStoreDriver dataStoreDriver = dataStore != null ? dataStore.getDriver() : null;
|
||||
if (dataStoreDriver == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (dataStoreDriver instanceof PrimaryDataStoreDriver) {
|
||||
return ((PrimaryDataStoreDriver)dataStoreDriver).requiresAccessForMigration(dataObject);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncCallFuture<VolumeApiResult> createVolumeAsync(VolumeInfo volume, DataStore dataStore) {
|
||||
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@
|
|||
<dependency>
|
||||
<groupId>com.sun.xml.bind</groupId>
|
||||
<artifactId>jaxb-impl</artifactId>
|
||||
<version>${cs.jaxb.version}</version>
|
||||
<version>${cs.jaxb.impl.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cxf</groupId>
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ intelligent IaaS cloud implementation.
|
|||
|
||||
%package management
|
||||
Summary: CloudStack management server UI
|
||||
Requires: java-11-openjdk
|
||||
Requires: java-17-openjdk
|
||||
Requires: (tzdata-java or timezone-java)
|
||||
Requires: python3
|
||||
Requires: bash
|
||||
|
|
@ -98,7 +98,7 @@ The Apache CloudStack files shared between agent and management server
|
|||
%package agent
|
||||
Summary: CloudStack Agent for KVM hypervisors
|
||||
Requires: (openssh-clients or openssh)
|
||||
Requires: java-11-openjdk
|
||||
Requires: java-17-openjdk
|
||||
Requires: tzdata-java
|
||||
Requires: %{name}-common = %{_ver}
|
||||
Requires: libvirt
|
||||
|
|
@ -135,7 +135,7 @@ The CloudStack baremetal agent
|
|||
|
||||
%package usage
|
||||
Summary: CloudStack Usage calculation server
|
||||
Requires: java-11-openjdk
|
||||
Requires: java-17-openjdk
|
||||
Requires: tzdata-java
|
||||
Group: System Environment/Libraries
|
||||
%description usage
|
||||
|
|
@ -556,8 +556,8 @@ if [ -f "/usr/share/cloudstack-common/scripts/installer/cloudstack-help-text" ];
|
|||
fi
|
||||
|
||||
%post marvin
|
||||
pip install --upgrade https://files.pythonhosted.org/packages/08/1f/42d74bae9dd6dcfec67c9ed0f3fa482b1ae5ac5f117ca82ab589ecb3ca19/mysql_connector_python-8.0.31-py2.py3-none-any.whl
|
||||
pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz
|
||||
pip3 install --upgrade https://files.pythonhosted.org/packages/08/1f/42d74bae9dd6dcfec67c9ed0f3fa482b1ae5ac5f117ca82ab589ecb3ca19/mysql_connector_python-8.0.31-py2.py3-none-any.whl
|
||||
pip3 install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz
|
||||
|
||||
#No default permission as the permission setup is complex
|
||||
%files management
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
JAVA_OPTS="-Djava.security.properties=/etc/cloudstack/management/java.security.ciphers -Djava.awt.headless=true -Dcom.sun.management.jmxremote=false -Xmx2G -XX:+UseParallelGC -XX:MaxGCPauseMillis=500 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/cloudstack/management/ -XX:ErrorFile=/var/log/cloudstack/management/cloudstack-management.err "
|
||||
JAVA_OPTS="-Djava.security.properties=/etc/cloudstack/management/java.security.ciphers -Djava.awt.headless=true -Dcom.sun.management.jmxremote=false -Xmx2G -XX:+UseParallelGC -XX:MaxGCPauseMillis=500 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/cloudstack/management/ -XX:ErrorFile=/var/log/cloudstack/management/cloudstack-management.err --add-opens=java.base/java.lang=ALL-UNNAMED --add-exports=java.base/sun.security.x509=ALL-UNNAMED"
|
||||
|
||||
CLASSPATH="/usr/share/cloudstack-management/lib/*:/etc/cloudstack/management:/usr/share/cloudstack-common:/usr/share/cloudstack-management/setup:/usr/share/cloudstack-management:/usr/share/java/mysql-connector-java.jar:/usr/share/cloudstack-mysql-ha/lib/*"
|
||||
|
||||
|
|
@ -24,7 +24,7 @@ BOOTSTRAP_CLASS=org.apache.cloudstack.ServerDaemon
|
|||
################################################################################################
|
||||
#You can uncomment one of these options if you want to enable Java remote debugging. #
|
||||
#You can change the parameters at your will. The 'address' field defines the port to be used. #
|
||||
################################################################################################
|
||||
################################################################################################
|
||||
# This option here should be used with 'systemmd' based operating systems such as CentOS7, Ubuntu 16, and so on.
|
||||
#JAVA_DEBUG="-agentlib:jdwp=transport=dt_socket,address=*:8000,server=y,suspend=n"
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
JAVA_OPTS="-Xms256m -Xmx2048m"
|
||||
JAVA_OPTS="-Xms256m -Xmx2048m --add-opens=java.base/java.lang=ALL-UNNAMED"
|
||||
|
||||
CLASSPATH="/usr/share/cloudstack-usage/*:/usr/share/cloudstack-usage/lib/*:/usr/share/cloudstack-mysql-ha/lib/*:/etc/cloudstack/usage:/usr/share/java/mysql-connector-java.jar"
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@
|
|||
<dependency>
|
||||
<groupId>com.sun.xml.bind</groupId>
|
||||
<artifactId>jaxb-impl</artifactId>
|
||||
<version>${cs.jaxb.version}</version>
|
||||
<version>${cs.jaxb.impl.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
|
|
|||
|
|
@ -77,6 +77,7 @@ public class LibvirtPatchSystemVmCommandWrapper extends CommandWrapper<PatchSyst
|
|||
if (patchResult.first()) {
|
||||
String scriptVersion = lines[1];
|
||||
if (StringUtils.isNotEmpty(patchResult.second())) {
|
||||
logger.debug("Patch result of systemVM {}: {}", sysVMName, patchResult.second());
|
||||
String res = patchResult.second().replace("\n", " ");
|
||||
String[] output = res.split(":");
|
||||
if (output.length != 2) {
|
||||
|
|
|
|||
|
|
@ -102,7 +102,9 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
|
|||
if (template != null) {
|
||||
response.setIsoId(template.getUuid());
|
||||
response.setIsoName(template.getName());
|
||||
response.setIsoState(template.getState().toString());
|
||||
if (template.getState() != null) {
|
||||
response.setIsoState(template.getState().toString());
|
||||
}
|
||||
response.setDirectDownload(template.isDirectDownload());
|
||||
}
|
||||
response.setCreated(kubernetesSupportedVersion.getCreated());
|
||||
|
|
|
|||
|
|
@ -21,14 +21,14 @@ function usage() {
|
|||
Usage: ./deploy-cloudstack-secret [OPTIONS]...
|
||||
To deploy the keys needed for the cloudstack kubernetes provider.
|
||||
Arguments:
|
||||
-u, --url string ID of the cluster
|
||||
-u, --url string URL of the CloudStack API
|
||||
-k, --key string API Key
|
||||
-s, --secret string Secret Key
|
||||
-p, --project string Project ID
|
||||
Other arguments:
|
||||
-h, --help Display this help message and exit
|
||||
Examples:
|
||||
./deploy-cloudstack-secret -u http://localhost:8080 -k abcd -s efgh
|
||||
./deploy-cloudstack-secret -u http://10.10.10.10:8080/client/api -k abcd -s efgh
|
||||
USAGE
|
||||
exit 0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ import com.cloud.vm.VirtualMachineProfile.Param;
|
|||
|
||||
public interface InternalLoadBalancerVMManager {
|
||||
//RAM/CPU for the system offering used by Internal LB VMs
|
||||
public static final int DEFAULT_INTERNALLB_VM_RAMSIZE = 256; // 256 MB
|
||||
public static final int DEFAULT_INTERNALLB_VM_RAMSIZE = 512; // 512 MB
|
||||
public static final int DEFAULT_INTERNALLB_VM_CPU_MHZ = 256; // 256 MHz
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -304,6 +304,11 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean requiresAccessForMigration(DataObject dataObject) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUsedBytes(StoragePool storagePool) {
|
||||
long usedSpaceBytes = 0;
|
||||
|
|
|
|||
7
pom.xml
7
pom.xml
|
|
@ -50,7 +50,7 @@
|
|||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||
<project.systemvm.template.location>https://download.cloudstack.org/systemvm</project.systemvm.template.location>
|
||||
<project.systemvm.template.version>4.19.0.0</project.systemvm.template.version>
|
||||
<project.systemvm.template.version>4.20.0.0</project.systemvm.template.version>
|
||||
<sonar.organization>apache</sonar.organization>
|
||||
<sonar.host.url>https://sonarcloud.io</sonar.host.url>
|
||||
|
||||
|
|
@ -151,7 +151,9 @@
|
|||
<cs.maven-javadoc-plugin.version>3.1.1</cs.maven-javadoc-plugin.version>
|
||||
<cs.javax.annotation.version>1.3.2</cs.javax.annotation.version>
|
||||
<cs.jaxb.version>2.3.0</cs.jaxb.version>
|
||||
<cs.jaxws.version>2.3.2-1</cs.jaxws.version>
|
||||
<cs.jaxb.impl.version>2.3.9</cs.jaxb.impl.version>
|
||||
<cs.jakarta.xml.bind.version>2.3.3</cs.jakarta.xml.bind.version>
|
||||
<cs.jaxws.version>2.3.7</cs.jaxws.version>
|
||||
<cs.jersey-client.version>2.26</cs.jersey-client.version>
|
||||
<cs.jetty.version>9.4.51.v20230217</cs.jetty.version>
|
||||
<cs.jetty-maven-plugin.version>9.4.27.v20200227</cs.jetty-maven-plugin.version>
|
||||
|
|
@ -1040,6 +1042,7 @@
|
|||
<exclude>systemvm/agent/js/jquery.js</exclude>
|
||||
<exclude>systemvm/agent/js/jquery.flot.navigate.js</exclude>
|
||||
<exclude>systemvm/agent/noVNC/**</exclude>
|
||||
<exclude>systemvm/agent/packages/**</exclude>
|
||||
<exclude>systemvm/debian/**</exclude>
|
||||
<exclude>test/integration/component/test_host_ha.sh</exclude>
|
||||
<exclude>test/systemvm/README.md</exclude>
|
||||
|
|
|
|||
|
|
@ -40,4 +40,10 @@ printf " * Release notes: https://docs.cloudstack.apache.org/en/${ACL_MINO
|
|||
printf " * Join mailing lists: https://cloudstack.apache.org/mailing-lists.html\n"
|
||||
printf " * Take the survey: https://cloudstack.apache.org/survey.html\n"
|
||||
printf " * Report issues: https://github.com/apache/cloudstack/issues/new\n"
|
||||
|
||||
if [ "$1" = "management" ];then
|
||||
printf "\nSince Apache CloudStack 4.20.0.0, the System VMs and virtual routers require at least 512 MiB memory, please check the System Offerings."
|
||||
printf "\nMore information can be found at https://docs.cloudstack.apache.org/en/${ACL_MINOR_VERSION:-latest}/upgrading/upgrade/_sysvm_restart.html\n"
|
||||
fi
|
||||
|
||||
printf "\n"
|
||||
|
|
|
|||
|
|
@ -335,7 +335,7 @@ def get_container(url, token, container, marker=None, limit=None,
|
|||
:param marker: marker query
|
||||
:param limit: limit query
|
||||
:param prefix: prefix query
|
||||
:param delimeter: string to delimit the queries on
|
||||
:param delimiter: string to delimit the queries on
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:param full_listing: if True, return a full listing, else returns a max
|
||||
|
|
|
|||
|
|
@ -337,7 +337,7 @@ def get_container(url, token, container, marker=None, limit=None,
|
|||
:param marker: marker query
|
||||
:param limit: limit query
|
||||
:param prefix: prefix query
|
||||
:param delimeter: string to delimit the queries on
|
||||
:param delimiter: string to delimit the queries on
|
||||
:param http_conn: HTTP connection object (If None, it will create the
|
||||
conn object)
|
||||
:param full_listing: if True, return a full listing, else returns a max
|
||||
|
|
|
|||
|
|
@ -959,7 +959,7 @@ public enum Config {
|
|||
ManagementServer.class,
|
||||
Integer.class,
|
||||
"network.loadbalancer.basiczone.elb.vm.ram.size",
|
||||
"128",
|
||||
"512",
|
||||
"Memory in MB for the elastic load balancer vm",
|
||||
null),
|
||||
ElasticLoadBalancerVmCpuMhz(
|
||||
|
|
@ -1291,7 +1291,7 @@ public enum Config {
|
|||
"The allowable clock difference in milliseconds between when an SSO login request is made and when it is received.",
|
||||
null),
|
||||
//NetworkType("Hidden", ManagementServer.class, String.class, "network.type", "vlan", "The type of network that this deployment will use.", "vlan,direct"),
|
||||
RouterRamSize("Hidden", NetworkOrchestrationService.class, Integer.class, "router.ram.size", "256", "Default RAM for router VM (in MB).", null),
|
||||
RouterRamSize("Hidden", NetworkOrchestrationService.class, Integer.class, "router.ram.size", "512", "Default RAM for router VM (in MB).", null),
|
||||
|
||||
DefaultPageSize("Advanced", ManagementServer.class, Long.class, "default.page.size", "500", "Default page size for API list* commands", null),
|
||||
|
||||
|
|
|
|||
|
|
@ -3334,7 +3334,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
|
|||
if (NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr())) {
|
||||
logger.warn(
|
||||
"Existing IP reservation will become ineffective for the network with id = " + networkId + " You need to reapply reservation after network reimplementation.");
|
||||
//set cidr to the newtork cidr
|
||||
//set cidr to the network cidr
|
||||
network.setCidr(network.getNetworkCidr());
|
||||
//set networkCidr to null to bring network back to no IP reservation state
|
||||
network.setNetworkCidr(null);
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ def generate_js_file(keymap_file):
|
|||
js_config.append(" * layout : %s\n" % layout)
|
||||
js_config.append(" */\n")
|
||||
js_config.append("export default {\n")
|
||||
for keycode in dict(sorted(result_mappings.items(), key=lambda item: int(item[0]))):
|
||||
for keycode in dict(sorted(list(result_mappings.items()), key=lambda item: int(item[0]))):
|
||||
js_config.append("%10s : \"%s\",\n" % ("\"" + str(keycode) + "\"", result_mappings[keycode].strip()))
|
||||
js_config.append("}\n")
|
||||
for line in js_config:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
[python-is-python3]
|
||||
debian_os=11
|
||||
package_name=python-is-python3
|
||||
file_name=python-is-python3_3.9.2-1_all.deb
|
||||
conflicted_packages=python-is-python2
|
||||
|
||||
[python3-netaddr]
|
||||
debian_os=11
|
||||
package_name=python3-netaddr
|
||||
file_name=python3-netaddr_0.7.19-5_all.deb
|
||||
conflicted_packages=
|
||||
Binary file not shown.
Binary file not shown.
|
|
@ -93,7 +93,7 @@
|
|||
# Enable/Disable SSL for this virtual host.
|
||||
SSLEngine on
|
||||
SSLProtocol TLSv1.2
|
||||
SSLCipherSuite @SECLEVEL=1:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA
|
||||
SSLCipherSuite @SECLEVEL=0:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA
|
||||
SSLHonorCipherOrder on
|
||||
|
||||
# A self-signed (snakeoil) certificate can be created by installing
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ expand-hosts
|
|||
# of valid alternatives, so we will give examples of each. Note that
|
||||
# IP addresses DO NOT have to be in the range given above, they just
|
||||
# need to be on the same network. The order of the parameters in these
|
||||
# do not matter, it's permissble to give name,adddress and MAC in any order
|
||||
# do not matter, it's permissble to give name,address and MAC in any order
|
||||
|
||||
# Always allocate the host with ethernet address 11:22:33:44:55:66
|
||||
# The IP address 192.168.0.60
|
||||
|
|
|
|||
|
|
@ -59,8 +59,8 @@ class ShellCmd(object):
|
|||
err = []
|
||||
err.append('failed to execute shell command: %s' % self.cmd)
|
||||
err.append('return code: %s' % self.process.returncode)
|
||||
err.append('stdout: %s' % self.stdout)
|
||||
err.append('stderr: %s' % self.stderr)
|
||||
err.append('stdout: %s' % self.stdout.decode())
|
||||
err.append('stderr: %s' % self.stderr.decode())
|
||||
raise Exception('\n'.join(err))
|
||||
|
||||
self.return_code = self.process.returncode
|
||||
|
|
|
|||
|
|
@ -21,8 +21,9 @@ import logging
|
|||
import os
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urllib2
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
import urllib.error
|
||||
import time
|
||||
import copy
|
||||
|
||||
|
|
@ -41,9 +42,12 @@ from cs.CsProcess import CsProcess
|
|||
from cs.CsStaticRoutes import CsStaticRoutes
|
||||
from cs.CsVpcGuestNetwork import CsVpcGuestNetwork
|
||||
|
||||
ICMPV6_TYPE_ANY = "{ destination-unreachable, packet-too-big, time-exceeded, parameter-problem, echo-request, echo-reply, mld-listener-query, mld-listener-report, mld-listener-done, nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect, router-renumbering }"
|
||||
ICMPV6_TYPE_ANY = "{ destination-unreachable, packet-too-big, time-exceeded, parameter-problem, \
|
||||
echo-request, echo-reply, mld-listener-query, mld-listener-report, mld-listener-done, \
|
||||
nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect, router-renumbering }"
|
||||
TCP_UDP_PORT_ANY = "{ 0-65535 }"
|
||||
|
||||
|
||||
def removeUndesiredCidrs(cidrs, version):
|
||||
version_char = ":"
|
||||
if version == 4:
|
||||
|
|
@ -61,15 +65,17 @@ def removeUndesiredCidrs(cidrs, version):
|
|||
return cidrs
|
||||
return None
|
||||
|
||||
|
||||
def appendStringIfNotEmpty(s1, s2):
|
||||
if s2:
|
||||
if type(s2) != str:
|
||||
if not isinstance(s2, str):
|
||||
s2 = str(s2)
|
||||
if s1:
|
||||
return s1 + " " + s2
|
||||
return s2
|
||||
return s1
|
||||
|
||||
|
||||
class CsPassword(CsDataBag):
|
||||
|
||||
TOKEN_FILE = "/tmp/passwdsrvrtoken"
|
||||
|
|
@ -107,10 +113,10 @@ class CsPassword(CsDataBag):
|
|||
if proc.find():
|
||||
url = "http://%s:8080/" % server_ip
|
||||
payload = {"ip": vm_ip, "password": password, "token": token}
|
||||
data = urllib.urlencode(payload)
|
||||
request = urllib2.Request(url, data=data, headers={"DomU_Request": "save_password"})
|
||||
data = urllib.parse.urlencode(payload).encode()
|
||||
request = urllib.request.Request(url, data=data, headers={"DomU_Request": "save_password"})
|
||||
try:
|
||||
resp = urllib2.urlopen(request, data)
|
||||
resp = urllib.request.urlopen(request, data)
|
||||
logging.debug("Update password server result: http:%s, content:%s" % (resp.code, resp.read()))
|
||||
except Exception as e:
|
||||
logging.error("Failed to update password server due to: %s" % e)
|
||||
|
|
@ -165,15 +171,15 @@ class CsAcl(CsDataBag):
|
|||
icmp_type = ''
|
||||
rule = self.rule
|
||||
icmp_type = "any"
|
||||
if "icmp_type" in self.rule.keys() and self.rule['icmp_type'] != -1:
|
||||
if "icmp_type" in list(self.rule.keys()) and self.rule['icmp_type'] != -1:
|
||||
icmp_type = self.rule['icmp_type']
|
||||
if "icmp_code" in self.rule.keys() and rule['icmp_code'] != -1:
|
||||
if "icmp_code" in list(self.rule.keys()) and rule['icmp_code'] != -1:
|
||||
icmp_type = "%s/%s" % (self.rule['icmp_type'], self.rule['icmp_code'])
|
||||
rnge = ''
|
||||
if "first_port" in self.rule.keys() and \
|
||||
if "first_port" in list(self.rule.keys()) and \
|
||||
self.rule['first_port'] == self.rule['last_port']:
|
||||
rnge = " --dport %s " % self.rule['first_port']
|
||||
if "first_port" in self.rule.keys() and \
|
||||
if "first_port" in list(self.rule.keys()) and \
|
||||
self.rule['first_port'] != self.rule['last_port']:
|
||||
rnge = " --dport %s:%s" % (rule['first_port'], rule['last_port'])
|
||||
|
||||
|
|
@ -278,14 +284,14 @@ class CsAcl(CsDataBag):
|
|||
self.device = obj['device']
|
||||
self.ip = obj['nic_ip']
|
||||
self.ip6_cidr = None
|
||||
if "nic_ip6_cidr" in obj.keys():
|
||||
if "nic_ip6_cidr" in list(obj.keys()):
|
||||
self.ip6_cidr = obj['nic_ip6_cidr']
|
||||
self.netmask = obj['nic_netmask']
|
||||
self.config = config
|
||||
self.cidr = "%s/%s" % (self.ip, self.netmask)
|
||||
if "ingress_rules" in obj.keys():
|
||||
if "ingress_rules" in list(obj.keys()):
|
||||
self.ingress = obj['ingress_rules']
|
||||
if "egress_rules" in obj.keys():
|
||||
if "egress_rules" in list(obj.keys()):
|
||||
self.egress = obj['egress_rules']
|
||||
self.fw = config.get_fw()
|
||||
self.ipv6_acl = config.get_ipv6_acl()
|
||||
|
|
@ -308,9 +314,9 @@ class CsAcl(CsDataBag):
|
|||
self.ipv6_acl.insert(0, {'type': "chain", 'chain': chain})
|
||||
for rule in rule_list:
|
||||
cidr = rule['cidr']
|
||||
if cidr != None and cidr != "":
|
||||
if cidr is not None and cidr != "":
|
||||
cidr = removeUndesiredCidrs(cidr, 4)
|
||||
if cidr == None or cidr == "":
|
||||
if cidr is None or cidr == "":
|
||||
continue
|
||||
addr = ""
|
||||
if cidr:
|
||||
|
|
@ -352,7 +358,7 @@ class CsAcl(CsDataBag):
|
|||
proto = "%s dport %s" % (proto, port)
|
||||
|
||||
action = "drop"
|
||||
if 'allowed' in rule.keys() and rule['allowed']:
|
||||
if 'allowed' in list(rule.keys()) and rule['allowed']:
|
||||
action = "accept"
|
||||
|
||||
rstr = addr
|
||||
|
|
@ -376,9 +382,9 @@ class CsAcl(CsDataBag):
|
|||
for i in rule_list:
|
||||
ruleData = copy.copy(i)
|
||||
cidr = ruleData['cidr']
|
||||
if cidr != None and cidr != "":
|
||||
if cidr is not None and cidr != "":
|
||||
cidr = removeUndesiredCidrs(cidr, 6)
|
||||
if cidr == None or cidr == "":
|
||||
if cidr is None or cidr == "":
|
||||
continue
|
||||
ruleData['cidr'] = cidr
|
||||
r = self.AclRule(direction, self, ruleData, self.config, count)
|
||||
|
|
@ -411,9 +417,9 @@ class CsAcl(CsDataBag):
|
|||
self.type = rule['type']
|
||||
self.icmp_type = "any"
|
||||
self.protocol = self.type
|
||||
if "icmp_type" in rule.keys() and rule['icmp_type'] != -1:
|
||||
if "icmp_type" in list(rule.keys()) and rule['icmp_type'] != -1:
|
||||
self.icmp_type = rule['icmp_type']
|
||||
if "icmp_code" in rule.keys() and rule['icmp_code'] != -1:
|
||||
if "icmp_code" in list(rule.keys()) and rule['icmp_code'] != -1:
|
||||
self.icmp_type = "%s/%s" % (self.icmp_type, rule['icmp_code'])
|
||||
if self.type == "protocol":
|
||||
if rule['protocol'] == 41:
|
||||
|
|
@ -421,11 +427,11 @@ class CsAcl(CsDataBag):
|
|||
self.protocol = rule['protocol']
|
||||
self.action = "DROP"
|
||||
self.dport = ""
|
||||
if 'allowed' in rule.keys() and rule['allowed']:
|
||||
if 'allowed' in list(rule.keys()) and rule['allowed']:
|
||||
self.action = "ACCEPT"
|
||||
if 'first_port' in rule.keys():
|
||||
if 'first_port' in list(rule.keys()):
|
||||
self.dport = "-m %s --dport %s" % (self.protocol, rule['first_port'])
|
||||
if 'last_port' in rule.keys() and self.dport and \
|
||||
if 'last_port' in list(rule.keys()) and self.dport and \
|
||||
rule['last_port'] != rule['first_port']:
|
||||
self.dport = "%s:%s" % (self.dport, rule['last_port'])
|
||||
|
||||
|
|
@ -488,7 +494,7 @@ class CsIpv6Firewall(CsDataBag):
|
|||
continue
|
||||
rule = self.dbag[item]
|
||||
|
||||
if chains_added == False:
|
||||
if chains_added is False:
|
||||
guest_cidr = rule['guest_ip6_cidr']
|
||||
parent_chain = "fw_forward"
|
||||
chain = "fw_chain_egress"
|
||||
|
|
@ -640,23 +646,26 @@ class CsVmMetadata(CsDataBag):
|
|||
fh = open(dest, "w")
|
||||
self.__exflock(fh)
|
||||
if data is not None:
|
||||
fh.write(data)
|
||||
if isinstance(data, str):
|
||||
fh.write(data)
|
||||
elif isinstance(data, bytes):
|
||||
fh.write(data.decode())
|
||||
else:
|
||||
fh.write("")
|
||||
self.__unflock(fh)
|
||||
fh.close()
|
||||
os.chmod(dest, 0644)
|
||||
os.chmod(dest, 0o644)
|
||||
|
||||
if folder == "metadata" or folder == "meta-data":
|
||||
try:
|
||||
os.makedirs(metamanifestdir, 0755)
|
||||
os.makedirs(metamanifestdir, 0o755)
|
||||
except OSError as e:
|
||||
# error 17 is already exists, we do it this way for concurrency
|
||||
if e.errno != 17:
|
||||
print "failed to make directories " + metamanifestdir + " due to :" + e.strerror
|
||||
print("failed to make directories " + metamanifestdir + " due to :" + e.strerror)
|
||||
sys.exit(1)
|
||||
if os.path.exists(metamanifest):
|
||||
fh = open(metamanifest, "r+a")
|
||||
fh = open(metamanifest, "a+")
|
||||
self.__exflock(fh)
|
||||
if file not in fh.read():
|
||||
fh.write(file + '\n')
|
||||
|
|
@ -670,17 +679,17 @@ class CsVmMetadata(CsDataBag):
|
|||
fh.close()
|
||||
|
||||
if os.path.exists(metamanifest):
|
||||
os.chmod(metamanifest, 0644)
|
||||
os.chmod(metamanifest, 0o644)
|
||||
|
||||
def __htaccess(self, ip, folder, file):
|
||||
entry = "RewriteRule ^" + file + "$ ../" + folder + "/%{REMOTE_ADDR}/" + file + " [L,NC,QSA]"
|
||||
htaccessFolder = "/var/www/html/latest"
|
||||
htaccessFile = htaccessFolder + "/.htaccess"
|
||||
|
||||
CsHelper.mkdir(htaccessFolder, 0755, True)
|
||||
CsHelper.mkdir(htaccessFolder, 0o755, True)
|
||||
|
||||
if os.path.exists(htaccessFile):
|
||||
fh = open(htaccessFile, "r+a")
|
||||
fh = open(htaccessFile, "a+")
|
||||
self.__exflock(fh)
|
||||
if entry not in fh.read():
|
||||
fh.write(entry + '\n')
|
||||
|
|
@ -699,11 +708,11 @@ class CsVmMetadata(CsDataBag):
|
|||
htaccessFile = htaccessFolder+"/.htaccess"
|
||||
|
||||
try:
|
||||
os.makedirs(htaccessFolder, 0755)
|
||||
os.makedirs(htaccessFolder, 0o755)
|
||||
except OSError as e:
|
||||
# error 17 is already exists, we do it this way for sake of concurrency
|
||||
if e.errno != 17:
|
||||
print "failed to make directories " + htaccessFolder + " due to :" + e.strerror
|
||||
print("failed to make directories " + htaccessFolder + " due to :" + e.strerror)
|
||||
sys.exit(1)
|
||||
|
||||
fh = open(htaccessFile, "w")
|
||||
|
|
@ -717,7 +726,7 @@ class CsVmMetadata(CsDataBag):
|
|||
htaccessFolder = "/var/www/html/latest"
|
||||
htaccessFile = htaccessFolder + "/.htaccess"
|
||||
|
||||
fh = open(htaccessFile, "r+a")
|
||||
fh = open(htaccessFile, "a+")
|
||||
self.__exflock(fh)
|
||||
if entry not in fh.read():
|
||||
fh.write(entry + '\n')
|
||||
|
|
@ -734,7 +743,7 @@ class CsVmMetadata(CsDataBag):
|
|||
try:
|
||||
flock(file, LOCK_EX)
|
||||
except IOError as e:
|
||||
print "failed to lock file" + file.name + " due to : " + e.strerror
|
||||
print("failed to lock file" + file.name + " due to : " + e.strerror)
|
||||
sys.exit(1) # FIXME
|
||||
return True
|
||||
|
||||
|
|
@ -742,7 +751,7 @@ class CsVmMetadata(CsDataBag):
|
|||
try:
|
||||
flock(file, LOCK_UN)
|
||||
except IOError as e:
|
||||
print "failed to unlock file" + file.name + " due to : " + e.strerror
|
||||
print("failed to unlock file" + file.name + " due to : " + e.strerror)
|
||||
sys.exit(1) # FIXME
|
||||
return True
|
||||
|
||||
|
|
@ -838,9 +847,9 @@ class CsSite2SiteVpn(CsDataBag):
|
|||
file.addeq(" authby=secret")
|
||||
file.addeq(" keyexchange=%s" % ikeversion)
|
||||
file.addeq(" ike=%s" % ikepolicy)
|
||||
file.addeq(" ikelifetime=%s" % self.convert_sec_to_h(obj['ike_lifetime']))
|
||||
file.addeq(" ikelifetime=%s" % self.convert_sec_to_min(obj['ike_lifetime']))
|
||||
file.addeq(" esp=%s" % esppolicy)
|
||||
file.addeq(" lifetime=%s" % self.convert_sec_to_h(obj['esp_lifetime']))
|
||||
file.addeq(" lifetime=%s" % self.convert_sec_to_min(obj['esp_lifetime']))
|
||||
file.addeq(" keyingtries=2")
|
||||
file.addeq(" auto=route")
|
||||
if 'encap' not in obj:
|
||||
|
|
@ -868,9 +877,9 @@ class CsSite2SiteVpn(CsDataBag):
|
|||
|
||||
# This will load the new config
|
||||
CsHelper.execute("ipsec reload")
|
||||
os.chmod(vpnsecretsfile, 0400)
|
||||
os.chmod(vpnsecretsfile, 0o400)
|
||||
|
||||
for i in xrange(3):
|
||||
for i in range(3):
|
||||
done = True
|
||||
for peeridx in range(0, len(peerlistarr)):
|
||||
# Check for the proper connection and subnet
|
||||
|
|
@ -891,9 +900,9 @@ class CsSite2SiteVpn(CsDataBag):
|
|||
ipinsubnet = '.'.join(octets)
|
||||
CsHelper.execute("timeout 5 ping -c 3 %s" % ipinsubnet)
|
||||
|
||||
def convert_sec_to_h(self, val):
|
||||
hrs = int(val) / 3600
|
||||
return "%sh" % hrs
|
||||
def convert_sec_to_min(self, val):
|
||||
mins = int(val / 60)
|
||||
return "%sm" % mins
|
||||
|
||||
|
||||
class CsVpnUser(CsDataBag):
|
||||
|
|
@ -1383,7 +1392,7 @@ def main(argv):
|
|||
databag_map.pop("guest_network")
|
||||
|
||||
def execDatabag(key, db):
|
||||
if key not in db.keys() or 'executor' not in db[key]:
|
||||
if key not in list(db.keys()) or 'executor' not in db[key]:
|
||||
logging.warn("Unable to find config or executor(s) for the databag type %s" % key)
|
||||
return
|
||||
for executor in db[key]['executor']:
|
||||
|
|
@ -1397,10 +1406,10 @@ def main(argv):
|
|||
|
||||
if json_type == "cmd_line":
|
||||
logging.debug("cmd_line.json changed. All other files will be processed as well.")
|
||||
for key in databag_map.keys():
|
||||
for key in list(databag_map.keys()):
|
||||
execDatabag(key, databag_map)
|
||||
execIptables(config)
|
||||
elif json_type in databag_map.keys():
|
||||
elif json_type in list(databag_map.keys()):
|
||||
execDatabag(json_type, databag_map)
|
||||
if databag_map[json_type]['process_iptables']:
|
||||
execIptables(config)
|
||||
|
|
@ -1411,5 +1420,6 @@ def main(argv):
|
|||
red.set()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
|
|
|
|||
|
|
@ -19,11 +19,11 @@ import logging
|
|||
from netaddr import IPAddress, IPNetwork
|
||||
import subprocess
|
||||
import time
|
||||
import CsHelper
|
||||
from CsDatabag import CsDataBag
|
||||
from CsApp import CsApache, CsDnsmasq, CsPasswdSvc
|
||||
from CsRoute import CsRoute
|
||||
from CsRule import CsRule
|
||||
from . import CsHelper
|
||||
from .CsDatabag import CsDataBag
|
||||
from .CsApp import CsApache, CsDnsmasq, CsPasswdSvc
|
||||
from .CsRoute import CsRoute
|
||||
from .CsRule import CsRule
|
||||
|
||||
VRRP_TYPES = ['guest']
|
||||
|
||||
|
|
@ -321,7 +321,7 @@ class CsIP:
|
|||
logging.info("Configuring address %s on device %s", self.ip(), self.dev)
|
||||
cmd = "ip addr add dev %s %s brd +" % (self.dev, self.ip())
|
||||
CsHelper.execute(cmd)
|
||||
cmd = "ifconfig %s mtu %s" % (self.dev, self.mtu())
|
||||
cmd = "ifconfig %s mtu %s" % (self.dev, self.mtu())
|
||||
CsHelper.execute(cmd)
|
||||
except Exception as e:
|
||||
logging.info("Exception occurred ==> %s" % e)
|
||||
|
|
@ -364,7 +364,7 @@ class CsIP:
|
|||
else:
|
||||
# once we start processing public ip's we need to verify there
|
||||
# is a default route and add if needed
|
||||
if(self.cl.get_gateway()):
|
||||
if self.cl.get_gateway():
|
||||
route.add_defaultroute(self.cl.get_gateway())
|
||||
|
||||
if self.config.is_router() and self.cl.get_ip6gateway():
|
||||
|
|
@ -556,7 +556,7 @@ class CsIP:
|
|||
"-A POSTROUTING -o %s -j SNAT --to-source %s" %
|
||||
(self.dev, self.address['public_ip'])])
|
||||
if self.get_gateway() == self.get_ip_address():
|
||||
for inf, addresses in self.config.address().dbag.iteritems():
|
||||
for inf, addresses in self.config.address().dbag.items():
|
||||
if not inf.startswith("eth"):
|
||||
continue
|
||||
for address in addresses:
|
||||
|
|
@ -625,7 +625,7 @@ class CsIP:
|
|||
if self.config.is_vpc():
|
||||
if self.get_type() in ["public"] and "gateway" in self.address and self.address["gateway"] and self.address["gateway"] != "None":
|
||||
route.add_route(self.dev, self.address["gateway"])
|
||||
for inf, addresses in self.config.address().dbag.iteritems():
|
||||
for inf, addresses in self.config.address().dbag.items():
|
||||
if not inf.startswith("eth"):
|
||||
continue
|
||||
for address in addresses:
|
||||
|
|
@ -709,7 +709,7 @@ class CsIP:
|
|||
self.iplist[cidr] = self.dev
|
||||
|
||||
def configured(self):
|
||||
if self.address['cidr'] in self.iplist.keys():
|
||||
if self.address['cidr'] in list(self.iplist.keys()):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
@ -738,7 +738,7 @@ class CsIP:
|
|||
return self.dev
|
||||
|
||||
def hasIP(self, ip):
|
||||
return ip in self.address.values()
|
||||
return ip in list(self.address.values())
|
||||
|
||||
def arpPing(self):
|
||||
cmd = "arping -c 1 -I %s -A -U -s %s %s" % (
|
||||
|
|
@ -749,7 +749,7 @@ class CsIP:
|
|||
|
||||
# Delete any ips that are configured but not in the bag
|
||||
def compare(self, bag):
|
||||
if len(self.iplist) > 0 and (self.dev not in bag.keys() or len(bag[self.dev]) == 0):
|
||||
if len(self.iplist) > 0 and (self.dev not in list(bag.keys()) or len(bag[self.dev]) == 0):
|
||||
# Remove all IPs on this device
|
||||
logging.info(
|
||||
"Will remove all configured addresses on device %s", self.dev)
|
||||
|
|
@ -760,13 +760,13 @@ class CsIP:
|
|||
# This condition should not really happen but did :)
|
||||
# It means an apache file got orphaned after a guest network address
|
||||
# was deleted
|
||||
if len(self.iplist) == 0 and (self.dev not in bag.keys() or len(bag[self.dev]) == 0):
|
||||
if len(self.iplist) == 0 and (self.dev not in list(bag.keys()) or len(bag[self.dev]) == 0):
|
||||
app = CsApache(self)
|
||||
app.remove()
|
||||
|
||||
for ip in self.iplist:
|
||||
found = False
|
||||
if self.dev in bag.keys():
|
||||
if self.dev in list(bag.keys()):
|
||||
for address in bag[self.dev]:
|
||||
self.setAddress(address)
|
||||
if (self.hasIP(ip) or self.is_guest_gateway(address, ip)) and address["add"]:
|
||||
|
|
@ -799,7 +799,7 @@ class CsIP:
|
|||
remove = []
|
||||
if ip == "all":
|
||||
logging.info("Removing addresses from device %s", self.dev)
|
||||
remove = self.iplist.keys()
|
||||
remove = list(self.iplist.keys())
|
||||
else:
|
||||
remove.append(ip)
|
||||
for ip in remove:
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import os
|
||||
from CsFile import CsFile
|
||||
import CsHelper
|
||||
from .CsFile import CsFile
|
||||
from . import CsHelper
|
||||
|
||||
|
||||
class CsApp:
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from CsDatabag import CsCmdLine, CsGuestNetwork
|
||||
from CsAddress import CsAddress
|
||||
from .CsDatabag import CsCmdLine, CsGuestNetwork
|
||||
from .CsAddress import CsAddress
|
||||
import logging
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ class CsDataBag(object):
|
|||
self.config = config
|
||||
|
||||
def dump(self):
|
||||
print self.dbag
|
||||
print(self.dbag)
|
||||
|
||||
def get_bag(self):
|
||||
return self.dbag
|
||||
|
|
@ -151,7 +151,7 @@ class CsCmdLine(CsDataBag):
|
|||
else:
|
||||
passwd = "%s-%s" % (self.get_vpccidr(), self.get_router_id())
|
||||
md5 = hashlib.md5()
|
||||
md5.update(passwd)
|
||||
md5.update(passwd.encode())
|
||||
return md5.hexdigest()
|
||||
|
||||
def get_gateway(self):
|
||||
|
|
@ -191,7 +191,7 @@ class CsGuestNetwork(CsDataBag):
|
|||
""" Get guestnetwork config parameters """
|
||||
|
||||
def get_dev_data(self, devname):
|
||||
if devname in self.dbag and type(self.dbag[devname]) == list and len(self.dbag[devname]) > 0:
|
||||
if devname in self.dbag and isinstance(self.dbag[devname], list) and len(self.dbag[devname]) > 0:
|
||||
return self.dbag[devname][0]
|
||||
return {}
|
||||
|
||||
|
|
@ -223,7 +223,7 @@ class CsGuestNetwork(CsDataBag):
|
|||
if devname:
|
||||
return self.__get_device_router_ip6prelen(devname)
|
||||
else:
|
||||
for key in self.dbag.keys():
|
||||
for key in list(self.dbag.keys()):
|
||||
ip6prelen = self.__get_device_router_ip6prelen(key)
|
||||
if ip6prelen:
|
||||
return ip6prelen
|
||||
|
|
@ -240,7 +240,7 @@ class CsGuestNetwork(CsDataBag):
|
|||
if devname:
|
||||
return self.__get_device_router_ip6gateway(devname)
|
||||
else:
|
||||
for key in self.dbag.keys():
|
||||
for key in list(self.dbag.keys()):
|
||||
ip6gateway = self.__get_device_router_ip6gateway(key)
|
||||
if ip6gateway:
|
||||
return ip6gateway
|
||||
|
|
|
|||
|
|
@ -14,13 +14,13 @@
|
|||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import CsHelper
|
||||
from . import CsHelper
|
||||
import logging
|
||||
import os
|
||||
from netaddr import *
|
||||
from random import randint
|
||||
import json
|
||||
from CsGuestNetwork import CsGuestNetwork
|
||||
from .CsGuestNetwork import CsGuestNetwork
|
||||
from cs.CsDatabag import CsDataBag
|
||||
from cs.CsFile import CsFile
|
||||
from cs.CsAddress import CsIP
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ class CsFile:
|
|||
|
||||
def dump(self):
|
||||
for line in self.new_config:
|
||||
print line
|
||||
print(line)
|
||||
|
||||
def addeq(self, string):
|
||||
""" Update a line in a file of the form token=something
|
||||
|
|
@ -153,7 +153,7 @@ class CsFile:
|
|||
logging.debug("Searching for %s string " % search)
|
||||
|
||||
for index, line in enumerate(self.new_config):
|
||||
print ' line = ' + line
|
||||
print(' line = ' + line)
|
||||
if line.lstrip().startswith(ignoreLinesStartWith):
|
||||
continue
|
||||
if search in line:
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from merge import DataBag
|
||||
import CsHelper
|
||||
from . import CsHelper
|
||||
|
||||
|
||||
class CsGuestNetwork:
|
||||
|
|
@ -27,7 +27,7 @@ class CsGuestNetwork:
|
|||
db.load()
|
||||
dbag = db.getDataBag()
|
||||
self.config = config
|
||||
if device in dbag.keys() and len(dbag[device]) != 0:
|
||||
if device in list(dbag.keys()) and len(dbag[device]) != 0:
|
||||
self.data = dbag[device][0]
|
||||
else:
|
||||
self.guest = False
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ def mkdir(name, mode, fatal):
|
|||
except OSError as e:
|
||||
if e.errno != 17:
|
||||
print("failed to make directories " + name + " due to :" + e.strerror)
|
||||
if(fatal):
|
||||
if fatal:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
|
@ -115,8 +115,8 @@ def get_device_info():
|
|||
list = []
|
||||
for i in execute("ip addr show |grep -v secondary"):
|
||||
vals = i.strip().lstrip().rstrip().split()
|
||||
if re.search('[0-9]:',vals[0]):
|
||||
to={}
|
||||
if re.search('[0-9]:', vals[0]):
|
||||
to = {}
|
||||
to['mtu'] = vals[4]
|
||||
list.append(to)
|
||||
|
||||
|
|
@ -124,7 +124,7 @@ def get_device_info():
|
|||
if len(list) > 0:
|
||||
to = list.pop(len(list)-1)
|
||||
else:
|
||||
to={}
|
||||
to = {}
|
||||
to['ip'] = vals[1]
|
||||
to['dev'] = vals[-1]
|
||||
to['network'] = IPNetwork(to['ip'])
|
||||
|
|
@ -198,7 +198,7 @@ def execute(command):
|
|||
returncode = 0
|
||||
|
||||
logging.debug("Command [%s] has the result [%s]" % (command, result))
|
||||
return result.splitlines()
|
||||
return result.decode().splitlines()
|
||||
except subprocess.CalledProcessError as e:
|
||||
logging.error(e)
|
||||
returncode = e.returncode
|
||||
|
|
|
|||
|
|
@ -18,9 +18,9 @@ import logging
|
|||
import os.path
|
||||
import re
|
||||
from cs.CsDatabag import CsDataBag
|
||||
from CsProcess import CsProcess
|
||||
from CsFile import CsFile
|
||||
import CsHelper
|
||||
from .CsProcess import CsProcess
|
||||
from .CsFile import CsFile
|
||||
from . import CsHelper
|
||||
|
||||
HAPROXY_CONF_T = "/etc/haproxy/haproxy.cfg.new"
|
||||
HAPROXY_CONF_P = "/etc/haproxy/haproxy.cfg"
|
||||
|
|
@ -30,9 +30,9 @@ class CsLoadBalancer(CsDataBag):
|
|||
""" Manage Load Balancer entries """
|
||||
|
||||
def process(self):
|
||||
if "config" not in self.dbag.keys():
|
||||
if "config" not in list(self.dbag.keys()):
|
||||
return
|
||||
if 'configuration' not in self.dbag['config'][0].keys():
|
||||
if 'configuration' not in list(self.dbag['config'][0].keys()):
|
||||
return
|
||||
config = self.dbag['config'][0]['configuration']
|
||||
file1 = CsFile(HAPROXY_CONF_T)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
# under the License.
|
||||
import logging
|
||||
from cs.CsDatabag import CsDataBag
|
||||
from CsFile import CsFile
|
||||
from .CsFile import CsFile
|
||||
import json
|
||||
|
||||
MON_CONFIG = "/etc/monitor.conf"
|
||||
|
|
|
|||
|
|
@ -15,8 +15,8 @@
|
|||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import CsHelper
|
||||
from CsDatabag import CsCmdLine
|
||||
from . import CsHelper
|
||||
from .CsDatabag import CsCmdLine
|
||||
import logging
|
||||
|
||||
|
||||
|
|
@ -28,7 +28,7 @@ class CsChain(object):
|
|||
self.count = {}
|
||||
|
||||
def add(self, table, chain):
|
||||
if table not in self.chain.keys():
|
||||
if table not in list(self.chain.keys()):
|
||||
self.chain.setdefault(table, []).append(chain)
|
||||
else:
|
||||
self.chain[table].append(chain)
|
||||
|
|
@ -40,7 +40,7 @@ class CsChain(object):
|
|||
self.count[chain] += 1
|
||||
|
||||
def get(self, table):
|
||||
if table not in self.chain.keys():
|
||||
if table not in list(self.chain.keys()):
|
||||
return {}
|
||||
return self.chain[table]
|
||||
|
||||
|
|
@ -51,7 +51,7 @@ class CsChain(object):
|
|||
return self.last_added
|
||||
|
||||
def has_chain(self, table, chain):
|
||||
if table not in self.chain.keys():
|
||||
if table not in list(self.chain.keys()):
|
||||
return False
|
||||
if chain not in self.chain[table]:
|
||||
return False
|
||||
|
|
@ -179,7 +179,7 @@ class CsNetfilters(object):
|
|||
# For now raising the log.
|
||||
# TODO: Need to fix in the framework.
|
||||
if ret.returncode != 0:
|
||||
error = ret.communicate()[0]
|
||||
error = ret.communicate()[0].decode()
|
||||
logging.debug("iptables command got failed ... continuing")
|
||||
ruleSet.add(tupledFw)
|
||||
self.chain.add_rule(rule_chain)
|
||||
|
|
@ -223,14 +223,15 @@ class CsNetfilters(object):
|
|||
self.rules[:] = [x for x in self.rules if not x == rule]
|
||||
|
||||
def add_ip6_chain(self, address_family, table, chain, hook, action):
|
||||
chain_policy = ""
|
||||
if hook:
|
||||
chain_policy = "type filter hook %s priority 0;" % hook
|
||||
if chain_policy and action:
|
||||
chain_policy = "%s policy %s;" % (chain_policy, action)
|
||||
CsHelper.execute("nft add chain %s %s %s '{ %s }'" % (address_family, table, chain, chain_policy))
|
||||
if hook == "input" or hook == "output":
|
||||
CsHelper.execute("nft add rule %s %s %s icmpv6 type { echo-request, echo-reply, nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert } accept" % (address_family, table, chain))
|
||||
chain_policy = ""
|
||||
if hook:
|
||||
chain_policy = "type filter hook %s priority 0;" % hook
|
||||
if chain_policy and action:
|
||||
chain_policy = "%s policy %s;" % (chain_policy, action)
|
||||
CsHelper.execute("nft add chain %s %s %s '{ %s }'" % (address_family, table, chain, chain_policy))
|
||||
if hook == "input" or hook == "output":
|
||||
CsHelper.execute("nft add rule %s %s %s icmpv6 type { echo-request, echo-reply, \
|
||||
nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert } accept" % (address_family, table, chain))
|
||||
|
||||
def apply_ip6_rules(self, rules, type):
|
||||
if len(rules) == 0:
|
||||
|
|
@ -238,14 +239,14 @@ class CsNetfilters(object):
|
|||
address_family = 'ip6'
|
||||
table = 'ip6_firewall'
|
||||
default_chains = [
|
||||
{ "chain": "fw_input", "hook": "input", "action": "drop"},
|
||||
{ "chain": "fw_forward", "hook": "forward", "action": "accept"}
|
||||
{"chain": "fw_input", "hook": "input", "action": "drop"},
|
||||
{"chain": "fw_forward", "hook": "forward", "action": "accept"}
|
||||
]
|
||||
if type == "acl":
|
||||
table = 'ip6_acl'
|
||||
default_chains = [
|
||||
{ "chain": "acl_input", "hook": "input", "action": "drop" },
|
||||
{ "chain": "acl_forward", "hook": "forward", "action": "accept"}
|
||||
{"chain": "acl_input", "hook": "input", "action": "drop"},
|
||||
{"chain": "acl_forward", "hook": "forward", "action": "accept"}
|
||||
]
|
||||
CsHelper.execute("nft add table %s %s" % (address_family, table))
|
||||
for chain in default_chains:
|
||||
|
|
@ -287,7 +288,7 @@ class CsNetfilter(object):
|
|||
self.seen = True
|
||||
|
||||
def __convert_to_dict(self, rule):
|
||||
rule = unicode(rule.lstrip())
|
||||
rule = str(rule.lstrip())
|
||||
rule = rule.replace('! -', '!_-')
|
||||
rule = rule.replace('-p all', '')
|
||||
rule = rule.replace(' ', ' ')
|
||||
|
|
@ -298,8 +299,8 @@ class CsNetfilter(object):
|
|||
rule = rule.replace('-m state', '-m2 state')
|
||||
rule = rule.replace('ESTABLISHED,RELATED', 'RELATED,ESTABLISHED')
|
||||
bits = rule.split(' ')
|
||||
rule = dict(zip(bits[0::2], bits[1::2]))
|
||||
if "-A" in rule.keys():
|
||||
rule = dict(list(zip(bits[0::2], bits[1::2])))
|
||||
if "-A" in list(rule.keys()):
|
||||
self.chain = rule["-A"]
|
||||
return rule
|
||||
|
||||
|
|
@ -334,7 +335,7 @@ class CsNetfilter(object):
|
|||
'--to-source', '--to-destination', '--mark']
|
||||
str = ''
|
||||
for k in order:
|
||||
if k in self.rule.keys():
|
||||
if k in list(self.rule.keys()):
|
||||
printable = k.replace('-m2', '-m')
|
||||
printable = printable.replace('!_-', '! -')
|
||||
if delete:
|
||||
|
|
@ -351,7 +352,7 @@ class CsNetfilter(object):
|
|||
return False
|
||||
if rule.get_chain() != self.get_chain():
|
||||
return False
|
||||
if len(rule.get_rule().items()) != len(self.get_rule().items()):
|
||||
if len(list(rule.get_rule().items())) != len(list(self.get_rule().items())):
|
||||
return False
|
||||
common = set(rule.get_rule().items()) & set(self.get_rule().items())
|
||||
if len(common) != len(rule.get_rule()):
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
# under the License.
|
||||
import os
|
||||
import re
|
||||
import CsHelper
|
||||
from . import CsHelper
|
||||
import logging
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -32,13 +32,13 @@
|
|||
# -------------------------------------------------------------------- #
|
||||
import os
|
||||
import logging
|
||||
import CsHelper
|
||||
from CsFile import CsFile
|
||||
from CsProcess import CsProcess
|
||||
from CsApp import CsPasswdSvc
|
||||
from CsAddress import CsDevice
|
||||
from CsRoute import CsRoute
|
||||
from CsStaticRoutes import CsStaticRoutes
|
||||
from . import CsHelper
|
||||
from .CsFile import CsFile
|
||||
from .CsProcess import CsProcess
|
||||
from .CsApp import CsPasswdSvc
|
||||
from .CsAddress import CsDevice
|
||||
from .CsRoute import CsRoute
|
||||
from .CsStaticRoutes import CsStaticRoutes
|
||||
import socket
|
||||
from time import sleep
|
||||
|
||||
|
|
@ -435,7 +435,7 @@ class CsRedundant(object):
|
|||
- public IPv6 for primary VR public NIC as its IPv6 gets lost on link down
|
||||
"""
|
||||
dev = ''
|
||||
if dev == interface.get_device() or not ipv6 :
|
||||
if dev == interface.get_device() or not ipv6:
|
||||
return
|
||||
dev = interface.get_device()
|
||||
command = "ip -6 address show %s | grep 'inet6 %s'" % (dev, ipv6)
|
||||
|
|
@ -458,7 +458,7 @@ class CsRedundant(object):
|
|||
- guest IPv6 gateway for primary VR guest NIC
|
||||
"""
|
||||
dev = ''
|
||||
if dev == interface.get_device() or not ipv6 :
|
||||
if dev == interface.get_device() or not ipv6:
|
||||
return
|
||||
dev = interface.get_device()
|
||||
command = "ip -6 address show %s | grep 'inet6 %s'" % (dev, ipv6)
|
||||
|
|
@ -495,7 +495,6 @@ class CsRedundant(object):
|
|||
CsHelper.service("radvd", "disable")
|
||||
logging.info(CsHelper.execute("systemctl status radvd"))
|
||||
|
||||
|
||||
def _add_ipv6_guest_gateway(self):
|
||||
"""
|
||||
Configure guest network gateway as IPv6 address for guest interface
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import CsHelper
|
||||
from . import CsHelper
|
||||
import logging
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import CsHelper
|
||||
from . import CsHelper
|
||||
import logging
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@
|
|||
# under the License.
|
||||
|
||||
import logging
|
||||
import CsHelper
|
||||
from CsDatabag import CsDataBag
|
||||
from . import CsHelper
|
||||
from .CsDatabag import CsDataBag
|
||||
|
||||
|
||||
class CsStaticRoutes(CsDataBag):
|
||||
|
|
|
|||
|
|
@ -17,14 +17,15 @@
|
|||
import logging
|
||||
import os.path
|
||||
from cs.CsDatabag import CsDataBag
|
||||
from CsFile import CsFile
|
||||
import CsHelper
|
||||
from .CsFile import CsFile
|
||||
from . import CsHelper
|
||||
|
||||
VPC_PUBLIC_INTERFACE = "eth1"
|
||||
|
||||
RADVD_CONF = "/etc/radvd.conf"
|
||||
RADVD_CONF_NEW = "/etc/radvd.conf.new"
|
||||
|
||||
|
||||
class CsVpcGuestNetwork(CsDataBag):
|
||||
""" Manage Vpc Guest Networks """
|
||||
|
||||
|
|
@ -53,13 +54,13 @@ class CsVpcGuestNetwork(CsDataBag):
|
|||
CsHelper.execute("sysctl net.ipv6.conf." + device + ".use_tempaddr=0")
|
||||
|
||||
def add_address_route(self, entry):
|
||||
if 'router_guest_ip6' in entry.keys() and entry['router_guest_ip6']:
|
||||
if 'router_guest_ip6' in list(entry.keys()) and entry['router_guest_ip6']:
|
||||
self.enable_ipv6(entry['device'])
|
||||
cidr_size = entry['router_guest_ip6_cidr'].split("/")[-1]
|
||||
full_addr = entry['router_guest_ip6_gateway'] + "/" + cidr_size
|
||||
if not CsHelper.execute("ip -6 addr show dev %s | grep -w %s" % (entry['device'], full_addr)):
|
||||
CsHelper.execute("ip -6 addr add %s dev %s" % (full_addr, entry['device']))
|
||||
if 'router_ip6' in entry.keys() and entry['router_ip6']:
|
||||
if 'router_ip6' in list(entry.keys()) and entry['router_ip6']:
|
||||
self.__disable_dad(VPC_PUBLIC_INTERFACE)
|
||||
full_public_addr = entry['router_ip6'] + "/" + cidr_size
|
||||
if not CsHelper.execute("ip -6 addr show dev %s | grep -w %s" % (VPC_PUBLIC_INTERFACE, full_public_addr)):
|
||||
|
|
@ -70,11 +71,11 @@ class CsVpcGuestNetwork(CsDataBag):
|
|||
return
|
||||
|
||||
def remove_address_route(self, entry):
|
||||
if 'router_guest_ip6' in entry.keys() and entry['router_guest_ip6']:
|
||||
if 'router_guest_ip6' in list(entry.keys()) and entry['router_guest_ip6']:
|
||||
cidr_size = entry['router_guest_ip6_cidr'].split("/")[-1]
|
||||
full_addr = entry['router_guest_ip6_gateway'] + "/" + cidr_size
|
||||
CsHelper.execute("ip -6 addr del %s dev %s" % (full_addr, entry['device']))
|
||||
if 'router_ip6' in entry.keys() and entry['router_ip6']:
|
||||
if 'router_ip6' in list(entry.keys()) and entry['router_ip6']:
|
||||
full_public_addr = entry['router_ip6'] + "/" + cidr_size
|
||||
CsHelper.execute("ip -6 addr del %s dev %s" % (full_public_addr, VPC_PUBLIC_INTERFACE))
|
||||
else:
|
||||
|
|
@ -94,7 +95,7 @@ class CsVpcGuestNetwork(CsDataBag):
|
|||
self.__disable_dad(device)
|
||||
|
||||
def add_radvd_conf(self, entry):
|
||||
if 'router_guest_ip6' in entry.keys() and entry['router_guest_ip6']:
|
||||
if 'router_guest_ip6' in list(entry.keys()) and entry['router_guest_ip6']:
|
||||
cidr_size = entry['router_guest_ip6_cidr'].split("/")[-1]
|
||||
full_addr = entry['router_guest_ip6_gateway'] + "/" + cidr_size
|
||||
self.conf.append("interface %s" % entry['device'])
|
||||
|
|
@ -107,7 +108,7 @@ class CsVpcGuestNetwork(CsDataBag):
|
|||
self.conf.append(" AdvOnLink on;")
|
||||
self.conf.append(" AdvAutonomous on;")
|
||||
self.conf.append(" };")
|
||||
if 'dns6' in entry.keys() and entry['dns6']:
|
||||
if 'dns6' in list(entry.keys()) and entry['dns6']:
|
||||
for dns in entry['dns6'].split(","):
|
||||
self.conf.append(" RDNSS %s" % dns)
|
||||
self.conf.append(" {")
|
||||
|
|
|
|||
|
|
@ -24,16 +24,16 @@ def merge(dbag, data):
|
|||
# This seems desirable ....
|
||||
if "add" in data and data['add'] is False and "ipv4_address" in data:
|
||||
if data['ipv4_address'] in dbag:
|
||||
del(dbag[data['ipv4_address']])
|
||||
del dbag[data['ipv4_address']]
|
||||
else:
|
||||
remove_keys = set()
|
||||
for key, entry in dbag.iteritems():
|
||||
for key, entry in dbag.items():
|
||||
if key != 'id' and entry['mac_address'] == data['mac_address']:
|
||||
remove_keys.add(key)
|
||||
break
|
||||
|
||||
for remove_key in remove_keys:
|
||||
del(dbag[remove_key])
|
||||
del dbag[remove_key]
|
||||
|
||||
dbag[data['ipv4_address']] = data
|
||||
|
||||
|
|
|
|||
|
|
@ -25,8 +25,8 @@ def merge(dbag, data):
|
|||
for rule in data['rules']:
|
||||
id = str(rule['id'])
|
||||
if rule['revoked']:
|
||||
if id in dbagc.keys():
|
||||
del(dbagc[id])
|
||||
elif id not in dbagc.keys():
|
||||
if id in list(dbagc.keys()):
|
||||
del dbagc[id]
|
||||
elif id not in list(dbagc.keys()):
|
||||
dbagc[id] = rule
|
||||
return dbagc
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ def merge(dbag, rules):
|
|||
dbag[source_ip] = [newrule]
|
||||
elif rules["type"] == "forwardrules":
|
||||
index = -1
|
||||
if source_ip in dbag.keys():
|
||||
if source_ip in list(dbag.keys()):
|
||||
for forward in dbag[source_ip]:
|
||||
if ruleCompare(forward, newrule):
|
||||
index = dbag[source_ip].index(forward)
|
||||
|
|
@ -51,15 +51,15 @@ def merge(dbag, rules):
|
|||
dbag[source_ip] = [newrule]
|
||||
else:
|
||||
if rules["type"] == "staticnatrules":
|
||||
if source_ip in dbag.keys():
|
||||
if source_ip in list(dbag.keys()):
|
||||
del dbag[source_ip]
|
||||
elif rules["type"] == "forwardrules":
|
||||
if source_ip in dbag.keys():
|
||||
if source_ip in list(dbag.keys()):
|
||||
index = -1
|
||||
for forward in dbag[source_ip]:
|
||||
if ruleCompare(forward, newrule):
|
||||
index = dbag[source_ip].index(forward)
|
||||
print "removing index %s" % str(index)
|
||||
print("removing index %s" % str(index))
|
||||
if not index == -1:
|
||||
del dbag[source_ip][index]
|
||||
|
||||
|
|
|
|||
|
|
@ -28,11 +28,11 @@ def merge(dbag, gn):
|
|||
device_to_die = dbag[device][0]
|
||||
try:
|
||||
dbag[device].remove(device_to_die)
|
||||
except ValueError, e:
|
||||
print "[WARN] cs_guestnetwork.py :: Error occurred removing item from databag. => %s" % device_to_die
|
||||
del(dbag[device])
|
||||
except ValueError as e:
|
||||
print("[WARN] cs_guestnetwork.py :: Error occurred removing item from databag. => %s" % device_to_die)
|
||||
del dbag[device]
|
||||
else:
|
||||
del(dbag[device])
|
||||
del dbag[device]
|
||||
|
||||
else:
|
||||
dbag.setdefault(device, []).append(gn)
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ def merge(dbag, ip):
|
|||
ip['network'] = str(ipo.network) + '/' + str(ipo.prefixlen)
|
||||
if 'mtu' in ip:
|
||||
ip['mtu'] = str(ip['mtu'])
|
||||
if 'nw_type' not in ip.keys():
|
||||
if 'nw_type' not in list(ip.keys()):
|
||||
ip['nw_type'] = 'public'
|
||||
else:
|
||||
ip['nw_type'] = ip['nw_type'].lower()
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@
|
|||
def merge(dbag, vpn):
|
||||
key = vpn['vpn_server_ip']
|
||||
op = vpn['create']
|
||||
if key in dbag.keys() and not op:
|
||||
del(dbag[key])
|
||||
if key in list(dbag.keys()) and not op:
|
||||
del dbag[key]
|
||||
else:
|
||||
dbag[key] = vpn
|
||||
return dbag
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@
|
|||
def merge(dbag, vpn):
|
||||
key = vpn['peer_gateway_ip']
|
||||
op = vpn['create']
|
||||
if key in dbag.keys() and not op:
|
||||
del(dbag[key])
|
||||
if key in list(dbag.keys()) and not op:
|
||||
del dbag[key]
|
||||
else:
|
||||
dbag[key] = vpn
|
||||
return dbag
|
||||
|
|
|
|||
|
|
@ -22,26 +22,26 @@ import copy
|
|||
def merge(dbag, data):
|
||||
dbagc = copy.deepcopy(dbag)
|
||||
|
||||
print dbag
|
||||
print data
|
||||
print(dbag)
|
||||
print(data)
|
||||
if "vpn_users" not in data:
|
||||
return dbagc
|
||||
|
||||
# remove previously deleted user from the dict
|
||||
for user in dbagc.keys():
|
||||
for user in list(dbagc.keys()):
|
||||
if user == 'id':
|
||||
continue
|
||||
userrec = dbagc[user]
|
||||
add = userrec['add']
|
||||
if not add:
|
||||
del(dbagc[user])
|
||||
del dbagc[user]
|
||||
|
||||
for user in data['vpn_users']:
|
||||
username = user['user']
|
||||
add = user['add']
|
||||
if username not in dbagc.keys():
|
||||
if username not in list(dbagc.keys()):
|
||||
dbagc[username] = user
|
||||
elif username in dbagc.keys() and not add:
|
||||
elif username in list(dbagc.keys()) and not add:
|
||||
dbagc[username] = user
|
||||
|
||||
return dbagc
|
||||
|
|
|
|||
|
|
@ -34,8 +34,8 @@ def run_cmd(command):
|
|||
return_code = 1
|
||||
|
||||
finally:
|
||||
print('%s&&' % stdout.strip())
|
||||
print('%s&&' % stderr.strip())
|
||||
print('%s&&' % stdout.decode().strip())
|
||||
print('%s&&' % stderr.decode().strip())
|
||||
print('%s' % return_code)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -28,17 +28,17 @@ def check_filesystem():
|
|||
readOnly1 = bool(stat1.f_flag & ST_RDONLY)
|
||||
|
||||
if (readOnly1):
|
||||
print "Read-only file system : monitor results (/root) file system is mounted as read-only"
|
||||
print("Read-only file system : monitor results (/root) file system is mounted as read-only")
|
||||
exit(1)
|
||||
|
||||
stat2 = os.statvfs('/var/cache/cloud')
|
||||
readOnly2 = bool(stat2.f_flag & ST_RDONLY)
|
||||
|
||||
if (readOnly2):
|
||||
print "Read-only file system : config info (/var/cache/cloud) file system is mounted as read-only"
|
||||
print("Read-only file system : config info (/var/cache/cloud) file system is mounted as read-only")
|
||||
exit(1)
|
||||
|
||||
print "file system is writable"
|
||||
print("file system is writable")
|
||||
exit(0)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ def zip_files(files):
|
|||
cleanup(files_from_shell_commands)
|
||||
generate_retrieved_files_txt(zf, files_found_list, files_not_found_list)
|
||||
zf.close()
|
||||
print zf_name
|
||||
print(zf_name)
|
||||
|
||||
|
||||
def get_cmd(script):
|
||||
|
|
@ -102,7 +102,7 @@ def execute_shell_script(script):
|
|||
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
return_code = p.returncode
|
||||
if return_code is 0:
|
||||
if return_code == 0:
|
||||
f.write(stdout)
|
||||
else:
|
||||
f.write(stderr)
|
||||
|
|
@ -129,9 +129,9 @@ def generate_retrieved_files_txt(zip_file, files_found, files_not_found):
|
|||
try:
|
||||
with open(output_file, 'wb', 0) as man:
|
||||
for i in files_found:
|
||||
man.write(i + '\n')
|
||||
man.write((i + '\n').encode())
|
||||
for j in files_not_found:
|
||||
man.write(j + 'File Not Found!!\n')
|
||||
man.write((j + ' File Not Found!!\n').encode())
|
||||
zip_file.write(output_file, output_file)
|
||||
finally:
|
||||
cleanup_cmd = "rm -f %s" % output_file
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ class updateDataBag:
|
|||
dp['mtu'] = str(d['mtu'])
|
||||
qf = QueueFile()
|
||||
qf.load({'ip_address': [dp], 'type': 'ips'})
|
||||
if 'domain_name' not in d.keys() or d['domain_name'] == '':
|
||||
if 'domain_name' not in list(d.keys()) or d['domain_name'] == '':
|
||||
d['domain_name'] = "cloudnine.internal"
|
||||
return cs_guestnetwork.merge(dbag, d)
|
||||
|
||||
|
|
@ -227,7 +227,7 @@ class updateDataBag:
|
|||
def processCLItem(self, num, nw_type):
|
||||
key = 'eth' + num + 'ip'
|
||||
dp = {}
|
||||
if(key in self.qFile.data['cmd_line']):
|
||||
if key in self.qFile.data['cmd_line']:
|
||||
dp['public_ip'] = self.qFile.data['cmd_line'][key]
|
||||
dp['netmask'] = self.qFile.data['cmd_line']['eth' + num + 'mask']
|
||||
dp['source_nat'] = False
|
||||
|
|
@ -236,7 +236,7 @@ class updateDataBag:
|
|||
if nw_type == "public":
|
||||
dp['gateway'] = self.qFile.data['cmd_line']['gateway']
|
||||
else:
|
||||
if('localgw' in self.qFile.data['cmd_line']):
|
||||
if 'localgw' in self.qFile.data['cmd_line']:
|
||||
dp['gateway'] = self.qFile.data['cmd_line']['localgw']
|
||||
else:
|
||||
dp['gateway'] = ''
|
||||
|
|
@ -252,7 +252,7 @@ class updateDataBag:
|
|||
def process_ipaliases(self, dbag):
|
||||
nic_dev = None
|
||||
# Should be a way to deal with this better
|
||||
for intf, data in dbag.items():
|
||||
for intf, data in list(dbag.items()):
|
||||
if intf == 'id':
|
||||
continue
|
||||
elif any([net['nw_type'] == 'guest' for net in data]):
|
||||
|
|
|
|||
|
|
@ -31,10 +31,10 @@ import os
|
|||
import sys
|
||||
import syslog
|
||||
import threading
|
||||
import urlparse
|
||||
import urllib.parse
|
||||
|
||||
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
|
||||
from SocketServer import ThreadingMixIn #, ForkingMixIn
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
from socketserver import ThreadingMixIn #, ForkingMixIn
|
||||
|
||||
|
||||
passMap = {}
|
||||
|
|
@ -55,7 +55,7 @@ def initToken():
|
|||
with open(getTokenFile(), 'r') as f:
|
||||
secureToken = f.read()
|
||||
if not secureToken:
|
||||
secureToken = binascii.hexlify(os.urandom(16))
|
||||
secureToken = binascii.hexlify(os.urandom(16)).decode()
|
||||
with open(getTokenFile(), 'w') as f:
|
||||
f.write(secureToken)
|
||||
|
||||
|
|
@ -64,7 +64,7 @@ def checkToken(token):
|
|||
|
||||
def loadPasswordFile():
|
||||
try:
|
||||
with file(getPasswordFile()) as f:
|
||||
with open(getPasswordFile()) as f:
|
||||
for line in f:
|
||||
if '=' not in line: continue
|
||||
key, value = line.strip().split('=', 1)
|
||||
|
|
@ -75,11 +75,11 @@ def loadPasswordFile():
|
|||
def savePasswordFile():
|
||||
with lock:
|
||||
try:
|
||||
with file(getPasswordFile(), 'w') as f:
|
||||
with open(getPasswordFile(), 'w') as f:
|
||||
for ip in passMap:
|
||||
f.write('%s=%s\n' % (ip, passMap[ip]))
|
||||
f.close()
|
||||
except IOError, e:
|
||||
except IOError as e:
|
||||
syslog.syslog('serve_password: Unable to save to password file %s' % e)
|
||||
|
||||
def getPassword(ip):
|
||||
|
|
@ -117,7 +117,7 @@ class PasswordRequestHandler(BaseHTTPRequestHandler):
|
|||
self.wfile.write('saved_password')
|
||||
syslog.syslog('serve_password: requested password not found for %s' % clientAddress)
|
||||
else:
|
||||
self.wfile.write(password)
|
||||
self.wfile.write(password.encode())
|
||||
syslog.syslog('serve_password: password sent to %s' % clientAddress)
|
||||
elif requestType == 'saved_password':
|
||||
removePassword(clientAddress)
|
||||
|
|
@ -192,7 +192,7 @@ def serve(HandlerClass = PasswordRequestHandler,
|
|||
except KeyboardInterrupt:
|
||||
syslog.syslog('serve_password shutting down')
|
||||
passwordServer.socket.close()
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
syslog.syslog('serve_password hit exception %s -- died' % e)
|
||||
passwordServer.socket.close()
|
||||
|
||||
|
|
|
|||
|
|
@ -72,3 +72,4 @@ setup_k8s_node() {
|
|||
}
|
||||
|
||||
setup_k8s_node
|
||||
. /opt/cloud/bin/setup/patch.sh && patch_sshd_config
|
||||
|
|
|
|||
|
|
@ -45,3 +45,5 @@ setup_console_proxy() {
|
|||
}
|
||||
|
||||
setup_console_proxy
|
||||
# System VMs are patched during bootstrap
|
||||
. /opt/cloud/bin/setup/patch.sh && patch_system_vm
|
||||
|
|
|
|||
|
|
@ -52,3 +52,4 @@ then
|
|||
exit 1
|
||||
fi
|
||||
setup_dhcpsrvr
|
||||
. /opt/cloud/bin/setup/patch.sh && patch_router
|
||||
|
|
|
|||
|
|
@ -41,3 +41,4 @@ then
|
|||
exit 1
|
||||
fi
|
||||
setup_elbvm
|
||||
. /opt/cloud/bin/setup/patch.sh && patch_router
|
||||
|
|
|
|||
|
|
@ -44,3 +44,4 @@ then
|
|||
exit 1
|
||||
fi
|
||||
setup_ilbvm
|
||||
. /opt/cloud/bin/setup/patch.sh && patch_router
|
||||
|
|
|
|||
|
|
@ -0,0 +1,128 @@
|
|||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin"
|
||||
|
||||
log_it() {
|
||||
echo "$(date) $@" >> /var/log/cloud.log
|
||||
}
|
||||
|
||||
patch_sshd_config() {
|
||||
if `! ssh -Q PubkeyAcceptedAlgorithms >/dev/null 2>&1` && `grep ^PubkeyAcceptedAlgorithms /etc/ssh/sshd_config >/dev/null`; then
|
||||
# "PubkeyAcceptedAlgorithms=+ssh-rsa" is added to /etc/ssh/sshd_config in 4.20.0 systemvm template
|
||||
# However, it is not supported in old systemvm templates
|
||||
# If the system vm is created from an old systemvm template, remove it from /etc/ssh/sshd_config
|
||||
# No need to restart ssh if it is running well
|
||||
log_it "Removing PubkeyAcceptedAlgorithms=+ssh-rsa from /etc/ssh/sshd_config as it is not supported"
|
||||
sed -i "/PubkeyAcceptedAlgorithms=+ssh-rsa/d" /etc/ssh/sshd_config
|
||||
if ! systemctl is-active ssh > /dev/null; then
|
||||
systemctl restart ssh
|
||||
fi
|
||||
elif `ssh -Q PubkeyAcceptedAlgorithms >/dev/null 2>&1` && `! grep ^PubkeyAcceptedAlgorithms /etc/ssh/sshd_config >/dev/null`; then
|
||||
log_it "Adding PubkeyAcceptedAlgorithms=+ssh-rsa to sshd_config"
|
||||
sed -i "/PubkeyAuthentication yes/aPubkeyAcceptedAlgorithms=+ssh-rsa" /etc/ssh/sshd_config
|
||||
systemctl restart ssh
|
||||
fi
|
||||
}
|
||||
|
||||
patch_router() {
|
||||
local patchfile="/var/cache/cloud/agent.zip"
|
||||
local logfile="/var/log/patchrouter.log"
|
||||
rm /usr/local/cloud/systemvm -rf
|
||||
mkdir -p /usr/local/cloud/systemvm
|
||||
ls -lrt $patchfile
|
||||
|
||||
log_it "Unziping $patchfile"
|
||||
echo "All" | unzip $patchfile -d /usr/local/cloud/systemvm >>$logfile 2>&1
|
||||
|
||||
find /usr/local/cloud/systemvm/ -name \*.sh | xargs chmod 555
|
||||
|
||||
patch_sshd_config
|
||||
install_packages
|
||||
}
|
||||
|
||||
patch_system_vm() {
|
||||
patch_sshd_config
|
||||
install_packages
|
||||
}
|
||||
|
||||
install_packages() {
|
||||
PACKAGES_FOLDER="/usr/local/cloud/systemvm/packages"
|
||||
PACKAGES_INI="$PACKAGES_FOLDER/packages.ini"
|
||||
declare -A package_properties
|
||||
if [ -d $PACKAGES_FOLDER ] && [ -f $PACKAGES_INI ]; then
|
||||
while read -r line; do
|
||||
if [[ "$line" =~ ^(\[)(.*)(\])$ ]]; then
|
||||
install_package
|
||||
package_properties=
|
||||
else
|
||||
key=$(echo $line | cut -d '=' -f1)
|
||||
value=$(echo $line | cut -d '=' -f2)
|
||||
if [ "$key" != "" ]; then
|
||||
package_properties[$key]=$value
|
||||
fi
|
||||
fi
|
||||
done <$PACKAGES_INI
|
||||
fi
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
install_package
|
||||
}
|
||||
|
||||
install_package() {
|
||||
local os=${package_properties["debian_os"]}
|
||||
if [ "$os" == "" ]; then
|
||||
return
|
||||
fi
|
||||
local DEBIAN_RELEASE=$(lsb_release -rs)
|
||||
if [ "$os" != "$DEBIAN_RELEASE" ]; then
|
||||
log_it "Skipped the installation of package $package on Debian $DEBIAN_RELEASE as it can only be installed on Debian $os."
|
||||
return
|
||||
fi
|
||||
|
||||
local package=${package_properties["package_name"]}
|
||||
local file=${package_properties["file_name"]}
|
||||
if [ -z "$package" ] || [ -z "$file" ]; then
|
||||
log_it "Skipped the installation due to empty package of file name (package name: $package, file name: $file)."
|
||||
return
|
||||
fi
|
||||
|
||||
dpkg-query -s $package >/dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
log_it "Skipped the installation as package $package has already been installed."
|
||||
return
|
||||
fi
|
||||
|
||||
local conflicts=${package_properties["conflicted_packages"]}
|
||||
if [ "$conflicts" != "" ]; then
|
||||
log_it "Removing conflicted packages \"$conflicts\" before installing package $package"
|
||||
apt remove -y "$conflicts"
|
||||
if [ $? -eq 0 ]; then
|
||||
log_it "Removed conflicted package(s) \"$conflicts\" before installing package $package"
|
||||
else
|
||||
log_it "Failed to remove conflicted package(s) \"$conflicts\" before installing package $package"
|
||||
fi
|
||||
fi
|
||||
|
||||
PACKAGES_FOLDER="/usr/local/cloud/systemvm/packages"
|
||||
log_it "Installing package $package from file $PACKAGES_FOLDER/$file"
|
||||
dpkg -i $PACKAGES_FOLDER/$file
|
||||
if [ $? -eq 0 ]; then
|
||||
log_it "Installed package $package from file $PACKAGES_FOLDER/$file"
|
||||
else
|
||||
log_it "Failed to install package $package from file $PACKAGES_FOLDER/$file"
|
||||
fi
|
||||
}
|
||||
|
|
@ -101,3 +101,4 @@ then
|
|||
exit 1
|
||||
fi
|
||||
setup_router
|
||||
. /opt/cloud/bin/setup/patch.sh && patch_router
|
||||
|
|
|
|||
|
|
@ -87,3 +87,5 @@ HTTP
|
|||
}
|
||||
|
||||
setup_secstorage
|
||||
# System VMs are patched during bootstrap
|
||||
. /opt/cloud/bin/setup/patch.sh && patch_system_vm
|
||||
|
|
|
|||
|
|
@ -129,3 +129,4 @@ then
|
|||
exit 1
|
||||
fi
|
||||
setup_vpcrouter
|
||||
. /opt/cloud/bin/setup/patch.sh && patch_router
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ def is_guestnet_configured(guestnet_dict, keys):
|
|||
existing_keys = []
|
||||
new_eth_key = None
|
||||
|
||||
for k1, v1 in guestnet_dict.iteritems():
|
||||
for k1, v1 in guestnet_dict.items():
|
||||
if k1 in keys and len(v1) > 0:
|
||||
existing_keys.append(k1)
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ def main(argv):
|
|||
try:
|
||||
opts, args = getopt.getopt(argv, "f:d:")
|
||||
except getopt.GetoptError:
|
||||
print 'params: -f <filename> -d <b64jsondata>'
|
||||
print('params: -f <filename> -d <b64jsondata>')
|
||||
sys.exit(2)
|
||||
for opt, arg in opts:
|
||||
if opt == '-f':
|
||||
|
|
@ -46,7 +46,7 @@ def main(argv):
|
|||
elif b64data != '':
|
||||
json_data = json.loads(base64.b64decode(b64data))
|
||||
else:
|
||||
print '-f <filename> or -d <b64jsondata> required'
|
||||
print('-f <filename> or -d <b64jsondata> required')
|
||||
sys.exit(2)
|
||||
|
||||
for ip in json_data:
|
||||
|
|
@ -94,20 +94,23 @@ def createfile(ip, folder, file, data):
|
|||
fh = open(dest, "w")
|
||||
exflock(fh)
|
||||
if data is not None:
|
||||
fh.write(data)
|
||||
if isinstance(data, str):
|
||||
fh.write(data)
|
||||
elif isinstance(data, bytes):
|
||||
fh.write(data.decode())
|
||||
else:
|
||||
fh.write("")
|
||||
unflock(fh)
|
||||
fh.close()
|
||||
os.chmod(dest, 0644)
|
||||
os.chmod(dest, 0o644)
|
||||
|
||||
if folder == "metadata" or folder == "meta-data":
|
||||
try:
|
||||
os.makedirs(metamanifestdir, 0755)
|
||||
os.makedirs(metamanifestdir, 0o755)
|
||||
except OSError as e:
|
||||
# error 17 is already exists, we do it this way for concurrency
|
||||
if e.errno != 17:
|
||||
print "failed to make directories " + metamanifestdir + " due to :" + e.strerror
|
||||
print("failed to make directories " + metamanifestdir + " due to :" + e.strerror)
|
||||
sys.exit(1)
|
||||
if os.path.exists(metamanifest):
|
||||
fh = open(metamanifest, "r+a")
|
||||
|
|
@ -124,7 +127,7 @@ def createfile(ip, folder, file, data):
|
|||
fh.close()
|
||||
|
||||
if os.path.exists(metamanifest):
|
||||
os.chmod(metamanifest, 0644)
|
||||
os.chmod(metamanifest, 0o644)
|
||||
|
||||
|
||||
def htaccess(ip, folder, file):
|
||||
|
|
@ -133,11 +136,11 @@ def htaccess(ip, folder, file):
|
|||
htaccessFile = htaccessFolder+"/.htaccess"
|
||||
|
||||
try:
|
||||
os.makedirs(htaccessFolder, 0755)
|
||||
os.makedirs(htaccessFolder, 0o755)
|
||||
except OSError as e:
|
||||
# error 17 is already exists, we do it this way for sake of concurrency
|
||||
if e.errno != 17:
|
||||
print "failed to make directories " + htaccessFolder + " due to :" + e.strerror
|
||||
print("failed to make directories " + htaccessFolder + " due to :" + e.strerror)
|
||||
sys.exit(1)
|
||||
|
||||
fh = open(htaccessFile, "w")
|
||||
|
|
@ -151,7 +154,7 @@ def exflock(file):
|
|||
try:
|
||||
flock(file, LOCK_EX)
|
||||
except IOError as e:
|
||||
print "failed to lock file" + file.name + " due to : " + e.strerror
|
||||
print("failed to lock file" + file.name + " due to : " + e.strerror)
|
||||
sys.exit(1)
|
||||
return True
|
||||
|
||||
|
|
@ -160,7 +163,7 @@ def unflock(file):
|
|||
try:
|
||||
flock(file, LOCK_UN)
|
||||
except IOError as e:
|
||||
print "failed to unlock file" + file.name + " due to : " + e.strerror
|
||||
print("failed to unlock file" + file.name + " due to : " + e.strerror)
|
||||
sys.exit(1)
|
||||
return True
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ def main():
|
|||
data = entries[0]
|
||||
|
||||
if "maxCpuUsage" not in data:
|
||||
print "Missing maxCpuUsage in health_checks_data systemThresholds, skipping"
|
||||
print("Missing maxCpuUsage in health_checks_data systemThresholds, skipping")
|
||||
exit(0)
|
||||
|
||||
maxCpuUsage = float(data["maxCpuUsage"])
|
||||
|
|
@ -38,16 +38,16 @@ def main():
|
|||
"sub(\"%\", \"\", idle); printf \"%.2f\", 100 - idle }'"
|
||||
pout = Popen(cmd, shell=True, stdout=PIPE)
|
||||
if pout.wait() == 0:
|
||||
currentUsage = float(pout.communicate()[0].strip())
|
||||
currentUsage = float(pout.communicate()[0].decode().strip())
|
||||
if currentUsage > maxCpuUsage:
|
||||
print "CPU Usage " + str(currentUsage) + \
|
||||
"% has crossed threshold of " + str(maxCpuUsage) + "%"
|
||||
print("CPU Usage " + str(currentUsage) +
|
||||
"% has crossed threshold of " + str(maxCpuUsage) + "%")
|
||||
exit(1)
|
||||
print "CPU Usage within limits with current at " \
|
||||
+ str(currentUsage) + "%"
|
||||
print("CPU Usage within limits with current at "
|
||||
+ str(currentUsage) + "%")
|
||||
exit(0)
|
||||
else:
|
||||
print "Failed to retrieve cpu usage using " + cmd
|
||||
print("Failed to retrieve cpu usage using " + cmd)
|
||||
exit(1)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ def main():
|
|||
vMs = getHealthChecksData("virtualMachines")
|
||||
|
||||
if vMs is None or len(vMs) == 0:
|
||||
print "No VMs running data available, skipping"
|
||||
print("No VMs running data available, skipping")
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
|
|
@ -64,10 +64,10 @@ def main():
|
|||
failureMessage = failureMessage + entry + ", "
|
||||
|
||||
if failedCheck:
|
||||
print failureMessage[:-2]
|
||||
print(failureMessage[:-2])
|
||||
exit(1)
|
||||
else:
|
||||
print "All " + str(COUNT) + " VMs are present in dhcphosts.txt"
|
||||
print("All " + str(COUNT) + " VMs are present in dhcphosts.txt")
|
||||
exit(0)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ def main():
|
|||
data = entries[0]
|
||||
|
||||
if "minDiskNeeded" not in data:
|
||||
print "Missing minDiskNeeded in health_checks_data systemThresholds, skipping"
|
||||
print("Missing minDiskNeeded in health_checks_data systemThresholds, skipping")
|
||||
exit(0)
|
||||
|
||||
minDiskNeeded = float(data["minDiskNeeded"]) * 1024
|
||||
|
|
@ -35,10 +35,10 @@ def main():
|
|||
freeSpace = (s.f_bavail * s.f_frsize) / 1024
|
||||
|
||||
if (freeSpace < minDiskNeeded):
|
||||
print "Insufficient free space is " + str(freeSpace/1024) + " MB"
|
||||
print("Insufficient free space is " + str(freeSpace/1024) + " MB")
|
||||
exit(1)
|
||||
else:
|
||||
print "Sufficient free space is " + str(freeSpace/1024) + " MB"
|
||||
print("Sufficient free space is " + str(freeSpace/1024) + " MB")
|
||||
exit(0)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ def main():
|
|||
vMs = getHealthChecksData("virtualMachines")
|
||||
|
||||
if vMs is None or len(vMs) == 0:
|
||||
print "No VMs running data available, skipping"
|
||||
print("No VMs running data available, skipping")
|
||||
exit(0)
|
||||
|
||||
with open('/etc/hosts', 'r') as hostsFile:
|
||||
|
|
@ -51,10 +51,10 @@ def main():
|
|||
failureMessage = failureMessage + vM["ip"] + " " + vM["vmName"] + ", "
|
||||
|
||||
if failedCheck:
|
||||
print failureMessage[:-2]
|
||||
print(failureMessage[:-2])
|
||||
exit(1)
|
||||
else:
|
||||
print "All " + str(COUNT) + " VMs are present in /etc/hosts"
|
||||
print("All " + str(COUNT) + " VMs are present in /etc/hosts")
|
||||
exit(0)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ from utility import getHealthChecksData
|
|||
def main():
|
||||
gws = getHealthChecksData("gateways")
|
||||
if gws is None and len(gws) == 0:
|
||||
print "No gateways data available, skipping"
|
||||
print("No gateways data available, skipping")
|
||||
exit(0)
|
||||
|
||||
unreachableGateWays = []
|
||||
|
|
@ -44,11 +44,11 @@ def main():
|
|||
unreachableGateWays.append(gw)
|
||||
|
||||
if len(unreachableGateWays) == 0:
|
||||
print "All " + str(len(gwsList)) + " gateways are reachable via ping"
|
||||
print("All " + str(len(gwsList)) + " gateways are reachable via ping")
|
||||
exit(0)
|
||||
else:
|
||||
print "Unreachable gateways found-"
|
||||
print unreachableGateWays
|
||||
print("Unreachable gateways found-")
|
||||
print(unreachableGateWays)
|
||||
exit(1)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ from utility import getHealthChecksData, formatPort
|
|||
def checkMaxconn(haproxyData, haCfgSections):
|
||||
if "maxconn" in haproxyData and "maxconn" in haCfgSections["global"]:
|
||||
if haproxyData["maxconn"] != haCfgSections["global"]["maxconn"][0].strip():
|
||||
print "global maxconn mismatch occurred"
|
||||
print("global maxconn mismatch occurred")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
@ -38,26 +38,26 @@ def checkLoadBalance(haproxyData, haCfgSections):
|
|||
secName = "listen " + srcServer
|
||||
|
||||
if secName not in haCfgSections:
|
||||
print "Missing section for load balancing " + secName + "\n"
|
||||
print("Missing section for load balancing " + secName + "\n")
|
||||
correct = False
|
||||
else:
|
||||
cfgSection = haCfgSections[secName]
|
||||
if "server" in cfgSection:
|
||||
if lbSec["algorithm"] != cfgSection["balance"][0]:
|
||||
print "Incorrect balance method for " + secName + \
|
||||
"Expected : " + lbSec["algorithm"] + \
|
||||
" but found " + cfgSection["balance"][0] + "\n"
|
||||
print("Incorrect balance method for " + secName +
|
||||
"Expected : " + lbSec["algorithm"] +
|
||||
" but found " + cfgSection["balance"][0] + "\n")
|
||||
correct = False
|
||||
|
||||
bindStr = lbSec["sourceIp"] + ":" + formatPort(lbSec["sourcePortStart"], lbSec["sourcePortEnd"])
|
||||
if cfgSection["bind"][0] != bindStr:
|
||||
print "Incorrect bind string found. Expected " + bindStr + " but found " + cfgSection["bind"][0] + "."
|
||||
print("Incorrect bind string found. Expected " + bindStr + " but found " + cfgSection["bind"][0] + ".")
|
||||
correct = False
|
||||
|
||||
if (lbSec["sourcePortStart"] == "80" and lbSec["sourcePortEnd"] == "80" and lbSec["keepAliveEnabled"] == "false") \
|
||||
or (lbSec["stickiness"].find("AppCookie") != -1 or lbSec["stickiness"].find("LbCookie") != -1):
|
||||
if not ("mode" in cfgSection and cfgSection["mode"][0] == "http"):
|
||||
print "Expected HTTP mode but not found"
|
||||
print("Expected HTTP mode but not found")
|
||||
correct = False
|
||||
|
||||
expectedServerIps = lbSec["vmIps"].split(" ")
|
||||
|
|
@ -74,7 +74,7 @@ def checkLoadBalance(haproxyData, haCfgSections):
|
|||
|
||||
if not foundPattern:
|
||||
correct = False
|
||||
print "Missing load balancing for " + pattern + ". "
|
||||
print("Missing load balancing for " + pattern + ". ")
|
||||
|
||||
return correct
|
||||
|
||||
|
|
@ -86,7 +86,7 @@ def main():
|
|||
'''
|
||||
haproxyData = getHealthChecksData("haproxyData")
|
||||
if haproxyData is None or len(haproxyData) == 0:
|
||||
print "No data provided to check, skipping"
|
||||
print("No data provided to check, skipping")
|
||||
exit(0)
|
||||
|
||||
with open("/etc/haproxy/haproxy.cfg", 'r') as haCfgFile:
|
||||
|
|
@ -94,7 +94,7 @@ def main():
|
|||
haCfgFile.close()
|
||||
|
||||
if len(haCfgLines) == 0:
|
||||
print "Unable to read config file /etc/haproxy/haproxy.cfg"
|
||||
print("Unable to read config file /etc/haproxy/haproxy.cfg")
|
||||
exit(1)
|
||||
|
||||
haCfgSections = {}
|
||||
|
|
@ -123,7 +123,7 @@ def main():
|
|||
checkLbRules = checkLoadBalance(haproxyData, haCfgSections)
|
||||
|
||||
if checkMaxConn and checkLbRules:
|
||||
print "All checks pass"
|
||||
print("All checks pass")
|
||||
exit(0)
|
||||
else:
|
||||
exit(1)
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ from utility import getHealthChecksData, formatPort
|
|||
def main():
|
||||
portForwards = getHealthChecksData("portForwarding")
|
||||
if portForwards is None or len(portForwards) == 0:
|
||||
print "No portforwarding rules provided to check, skipping"
|
||||
print("No portforwarding rules provided to check, skipping")
|
||||
exit(0)
|
||||
|
||||
failedCheck = False
|
||||
|
|
@ -47,7 +47,7 @@ def main():
|
|||
"for fetching rules by " + fetchIpTableEntriesCmd + "\n"
|
||||
continue
|
||||
|
||||
ipTablesMatchingEntries = pout.communicate()[0].strip().split('\n')
|
||||
ipTablesMatchingEntries = pout.communicate()[0].decode().strip().split('\n')
|
||||
for pfEntryListExpected in entriesExpected:
|
||||
foundPfEntryList = False
|
||||
for ipTableEntry in ipTablesMatchingEntries:
|
||||
|
|
@ -68,10 +68,10 @@ def main():
|
|||
failureMessage = failureMessage + str(pfEntryListExpected) + "\n"
|
||||
|
||||
if failedCheck:
|
||||
print failureMessage
|
||||
print(failureMessage)
|
||||
exit(1)
|
||||
else:
|
||||
print "Found all entries (count " + str(len(portForwards)) + ") in iptables"
|
||||
print("Found all entries (count " + str(len(portForwards)) + ") in iptables")
|
||||
exit(0)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ def main():
|
|||
data = entries[0]
|
||||
|
||||
if "maxMemoryUsage" not in data:
|
||||
print "Missing maxMemoryUsage in health_checks_data " + \
|
||||
"systemThresholds, skipping"
|
||||
print("Missing maxMemoryUsage in health_checks_data " +
|
||||
"systemThresholds, skipping")
|
||||
exit(0)
|
||||
|
||||
maxMemoryUsage = float(data["maxMemoryUsage"])
|
||||
|
|
@ -37,16 +37,16 @@ def main():
|
|||
pout = Popen(cmd, shell=True, stdout=PIPE)
|
||||
|
||||
if pout.wait() == 0:
|
||||
currentUsage = float(pout.communicate()[0].strip())
|
||||
currentUsage = float(pout.communicate()[0].decode().strip())
|
||||
if currentUsage > maxMemoryUsage:
|
||||
print "Memory Usage " + str(currentUsage) + \
|
||||
"% has crossed threshold of " + str(maxMemoryUsage) + "%"
|
||||
print("Memory Usage " + str(currentUsage) +
|
||||
"% has crossed threshold of " + str(maxMemoryUsage) + "%")
|
||||
exit(1)
|
||||
print "Memory Usage within limits with current at " + \
|
||||
str(currentUsage) + "%"
|
||||
print("Memory Usage within limits with current at " +
|
||||
str(currentUsage) + "%")
|
||||
exit(0)
|
||||
else:
|
||||
print "Failed to retrieve memory usage using " + cmd
|
||||
print("Failed to retrieve memory usage using " + cmd)
|
||||
exit(1)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ def main():
|
|||
data = entries[0]
|
||||
|
||||
if len(data) == 0:
|
||||
print "Missing routerVersion in health_checks_data, skipping"
|
||||
print("Missing routerVersion in health_checks_data, skipping")
|
||||
exit(0)
|
||||
|
||||
templateVersionMatches = True
|
||||
|
|
@ -52,11 +52,11 @@ def main():
|
|||
releaseFile = "/etc/cloudstack-release"
|
||||
found = getFirstLine(releaseFile)
|
||||
if found is None:
|
||||
print "Release version not yet setup at " + releaseFile +\
|
||||
", skipping."
|
||||
print("Release version not yet setup at " + releaseFile +
|
||||
", skipping.")
|
||||
elif expected != found:
|
||||
print "Template Version mismatch. Expected: " + \
|
||||
expected + ", found: " + found
|
||||
print("Template Version mismatch. Expected: " +
|
||||
expected + ", found: " + found)
|
||||
templateVersionMatches = False
|
||||
|
||||
if "scriptsVersion" in data:
|
||||
|
|
@ -64,15 +64,15 @@ def main():
|
|||
sigFile = "/var/cache/cloud/cloud-scripts-signature"
|
||||
found = getFirstLine(sigFile)
|
||||
if found is None:
|
||||
print "Scripts signature is not yet setup at " + sigFile +\
|
||||
", skipping"
|
||||
print("Scripts signature is not yet setup at " + sigFile +
|
||||
", skipping")
|
||||
if expected != found:
|
||||
print "Scripts Version mismatch. Expected: " + \
|
||||
expected + ", found: " + found
|
||||
print("Scripts Version mismatch. Expected: " +
|
||||
expected + ", found: " + found)
|
||||
scriptVersionMatches = False
|
||||
|
||||
if templateVersionMatches and scriptVersionMatches:
|
||||
print "Template and scripts version match successful"
|
||||
print("Template and scripts version match successful")
|
||||
exit(0)
|
||||
else:
|
||||
exit(1)
|
||||
|
|
|
|||
|
|
@ -16,4 +16,4 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sharedFunctions import getHealthChecksData, formatPort
|
||||
from .sharedFunctions import getHealthChecksData, formatPort
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ConfigParser import SafeConfigParser
|
||||
from configparser import ConfigParser
|
||||
from subprocess import *
|
||||
from datetime import datetime
|
||||
import time
|
||||
|
|
@ -56,7 +56,7 @@ def getServicesConfig( config_file_path = "/etc/monitor.conf" ):
|
|||
|
||||
"""
|
||||
process_dict = {}
|
||||
parser = SafeConfigParser()
|
||||
parser = ConfigParser()
|
||||
parser.read( config_file_path )
|
||||
|
||||
|
||||
|
|
@ -81,7 +81,7 @@ def printd (msg):
|
|||
f.seek(0, 2)
|
||||
f.write(str(msg)+"\n")
|
||||
f.close()
|
||||
print str(msg)
|
||||
print(str(msg))
|
||||
|
||||
def raisealert(severity, msg, process_name=None):
|
||||
""" Writes the alert message"""
|
||||
|
|
@ -96,7 +96,7 @@ def raisealert(severity, msg, process_name=None):
|
|||
logging.info(log)
|
||||
msg = 'logger -t monit '+ log
|
||||
pout = Popen(msg, shell=True, stdout=PIPE)
|
||||
print "[Alert] " + msg
|
||||
print("[Alert] " + msg)
|
||||
|
||||
|
||||
def isPidMatchPidFile(pidfile, pids):
|
||||
|
|
@ -148,7 +148,7 @@ def checkProcessRunningStatus(process_name, pidFile):
|
|||
#cmd = 'service ' + process_name + ' status'
|
||||
pout = Popen(cmd, shell=True, stdout=PIPE)
|
||||
exitStatus = pout.wait()
|
||||
temp_out = pout.communicate()[0]
|
||||
temp_out = pout.communicate()[0].decode()
|
||||
|
||||
#check there is only one pid or not
|
||||
if exitStatus == 0:
|
||||
|
|
@ -258,12 +258,12 @@ def monitProcess( processes_info ):
|
|||
printd("No config items provided - means a redundant VR or a VPC Router")
|
||||
return service_status, failing_services
|
||||
|
||||
print "[Process Info] " + json.dumps(processes_info)
|
||||
print("[Process Info] " + json.dumps(processes_info))
|
||||
|
||||
#time for noting process down time
|
||||
csec = repr(time.time()).split('.')[0]
|
||||
|
||||
for process,properties in processes_info.items():
|
||||
for process,properties in list(processes_info.items()):
|
||||
printd ("---------------------------\nchecking the service %s\n---------------------------- " %process)
|
||||
serviceName = process + ".service"
|
||||
processStatus, wasRestarted = checkProcessStatus(properties)
|
||||
|
|
@ -296,7 +296,7 @@ def execute(script, checkType = "basic"):
|
|||
|
||||
pout = Popen(cmd, shell=True, stdout=PIPE)
|
||||
exitStatus = pout.wait()
|
||||
output = pout.communicate()[0].strip()
|
||||
output = pout.communicate()[0].decode().strip()
|
||||
checkEndTime = time.time()
|
||||
|
||||
if exitStatus == 0:
|
||||
|
|
|
|||
|
|
@ -104,16 +104,18 @@ cleanup_systemVM() {
|
|||
rm -rf $backupfolder
|
||||
mv "$newpath"cloud-scripts.tgz /usr/share/cloud/cloud-scripts.tgz
|
||||
rm -rf "$newpath""agent.zip" "$newpath""patch-sysvms.sh"
|
||||
if [ "$TYPE" != "consoleproxy" ] && [ "$TYPE" != "secstorage" ]; then
|
||||
rm -rf /usr/local/cloud/systemvm/
|
||||
fi
|
||||
}
|
||||
|
||||
patch_systemvm() {
|
||||
rm -rf /usr/local/cloud/systemvm
|
||||
|
||||
if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ]; then
|
||||
echo "All" | unzip $newpath/agent.zip -d /usr/local/cloud/systemvm >> $logfile 2>&1
|
||||
mkdir -p /usr/local/cloud/systemvm
|
||||
find /usr/local/cloud/systemvm/ -name \*.sh | xargs chmod 555
|
||||
fi
|
||||
echo "All" | unzip $newpath/agent.zip -d /usr/local/cloud/systemvm >> $logfile 2>&1
|
||||
mkdir -p /usr/local/cloud/systemvm
|
||||
find /usr/local/cloud/systemvm/ -name \*.sh | xargs chmod 555
|
||||
|
||||
echo "Extracting cloud scripts" >> $logfile 2>&1
|
||||
tar -xvf $newpath/cloud-scripts.tgz -C / >> $logfile 2>&1
|
||||
|
||||
|
|
@ -124,6 +126,10 @@ patch_systemvm() {
|
|||
|
||||
update_checksum $newpath/cloud-scripts.tgz
|
||||
|
||||
if [ -f /opt/cloud/bin/setup/patch.sh ];then
|
||||
. /opt/cloud/bin/setup/patch.sh && patch_system_vm
|
||||
fi
|
||||
|
||||
if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ] || [[ "$TYPE" == *router ]]; then
|
||||
restart_services
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -121,5 +121,12 @@
|
|||
<include>**/*</include>
|
||||
</includes>
|
||||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>agent/packages</directory>
|
||||
<outputDirectory>packages</outputDirectory>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
</fileSet>
|
||||
</fileSets>
|
||||
</assembly>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,20 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "debian/opt/cloud/bin"))
|
||||
|
|
@ -45,5 +45,5 @@ then
|
|||
fi
|
||||
|
||||
echo "Running systemvm/python unit tests"
|
||||
nosetests2.7 .
|
||||
nosetests3 .
|
||||
exit $?
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ class TestDeployVmWithUserDataMultiNic(cloudstackTestCase):
|
|||
# Enable Network offering
|
||||
cls.network_offering_nouserdata.update(cls.api_client, state='Enabled')
|
||||
|
||||
# Create Network Offering with all the serices
|
||||
# Create Network Offering with all the services
|
||||
cls.network_offering_all = NetworkOffering.create(
|
||||
cls.api_client,
|
||||
cls.test_data["isolated_network_offering"]
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ class TestNetworkMigration(cloudstackTestCase):
|
|||
cls.network_offering_nouserdata.update(cls.api_client,
|
||||
state='Enabled')
|
||||
|
||||
# Create Network Offering with all the serices
|
||||
# Create Network Offering with all the services
|
||||
cls.network_offering_all = NetworkOffering.create(
|
||||
cls.api_client,
|
||||
cls.test_data["isolated_network_offering"]
|
||||
|
|
|
|||
|
|
@ -697,15 +697,16 @@ class TestIpv6Network(cloudstackTestCase):
|
|||
"IPv6 firewall rule ICMP code mismatch %d, %d" % (rule.icmpcode, icmp_code))
|
||||
routerCmd = "nft list chain ip6 %s %s" % (FIREWALL_TABLE, FIREWALL_CHAINS[traffic_type])
|
||||
res = self.getRouterProcessStatus(self.getNetworkRouter(self.network), routerCmd)
|
||||
self.assertTrue(parsed_rule in res,
|
||||
"Listing firewall rule with nft list chain failure for rule: %s" % parsed_rule)
|
||||
parsed_rule_new = parsed_rule.replace("{ ", "").replace(" }", "")
|
||||
self.assertTrue(parsed_rule in res or parsed_rule_new in res,
|
||||
"Listing firewall rule with nft list chain failure for rule: '%s' is not in '%s'" % (parsed_rule, res))
|
||||
if delete == True:
|
||||
cmd = deleteIpv6FirewallRule.deleteIpv6FirewallRuleCmd()
|
||||
cmd.id = fw_rule.id
|
||||
self.userapiclient.deleteIpv6FirewallRule(cmd)
|
||||
res = self.getRouterProcessStatus(self.getNetworkRouter(self.network), routerCmd)
|
||||
self.assertFalse(parsed_rule in res,
|
||||
"Firewall rule present in nft list chain failure despite delete for rule: %s" % parsed_rule)
|
||||
self.assertFalse(parsed_rule in res or parsed_rule_new in res,
|
||||
"Firewall rule present in nft list chain failure despite delete for rule: '%s' is in '%s'" % (parsed_rule, res))
|
||||
|
||||
def checkIpv6FirewallRule(self):
|
||||
traffic_type = "Ingress"
|
||||
|
|
|
|||
|
|
@ -22,7 +22,8 @@ from marvin.cloudstackTestCase import cloudstackTestCase
|
|||
from marvin.cloudstackAPI import (stopRouter,
|
||||
restartNetwork,
|
||||
startRouter,
|
||||
rebootRouter)
|
||||
rebootRouter,
|
||||
getRouterHealthCheckResults)
|
||||
from marvin.lib.utils import (cleanup_resources,
|
||||
get_process_status,
|
||||
get_host_credentials)
|
||||
|
|
@ -303,7 +304,81 @@ class TestRouterServices(cloudstackTestCase):
|
|||
"Check haproxy service is running or not"
|
||||
)
|
||||
self.debug("Haproxy process status: %s" % res)
|
||||
return
|
||||
|
||||
routers = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
fetchhealthcheckresults=True
|
||||
)
|
||||
|
||||
self.assertEqual(isinstance(routers, list), True,
|
||||
"Check for list routers response return valid data"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(routers), 0,
|
||||
"Check list router response"
|
||||
)
|
||||
|
||||
router = routers[0]
|
||||
self.info("Router ID: %s & Router state: %s" % (
|
||||
router.id, router.state
|
||||
))
|
||||
|
||||
self.assertEqual(isinstance(router.healthcheckresults, list), True,
|
||||
"Router response should contain it's health check result as list"
|
||||
)
|
||||
|
||||
cmd = getRouterHealthCheckResults.getRouterHealthCheckResultsCmd()
|
||||
cmd.routerid = router.id
|
||||
cmd.performfreshchecks = True # Perform fresh checks as a newly created router may not have results
|
||||
healthData = self.apiclient.getRouterHealthCheckResults(cmd)
|
||||
self.info("Router ID: %s & Router state: %s" % (
|
||||
router.id, router.state
|
||||
))
|
||||
|
||||
self.assertEqual(router.id, healthData.routerid,
|
||||
"Router response should contain it's health check result so id should match"
|
||||
)
|
||||
self.assertEqual(isinstance(healthData.healthchecks, list), True,
|
||||
"Router response should contain it's health check result as list"
|
||||
)
|
||||
|
||||
self.verifyCheckTypes(healthData.healthchecks)
|
||||
self.verifyCheckNames(healthData.healthchecks)
|
||||
self.verifyCheckResults(healthData.healthchecks)
|
||||
|
||||
def verifyCheckTypes(self, healthChecks):
|
||||
for checkType in ["basic", "advanced"]:
|
||||
foundType = False
|
||||
for check in healthChecks:
|
||||
if check.checktype == checkType:
|
||||
foundType = True
|
||||
break
|
||||
self.assertTrue(foundType,
|
||||
"Router should contain health check results info for type: " + checkType
|
||||
)
|
||||
|
||||
def verifyCheckNames(self, healthChecks):
|
||||
for checkName in ["dns_check.py", "dhcp_check.py", "haproxy_check.py", "disk_space_check.py", "iptables_check.py", "gateways_check.py", "router_version_check.py"]:
|
||||
foundCheck = False
|
||||
for check in healthChecks:
|
||||
if check.checkname == checkName:
|
||||
foundCheck = True
|
||||
break
|
||||
self.assertTrue(foundCheck,
|
||||
"Router should contain health check results info for check name: " + checkName
|
||||
)
|
||||
|
||||
def verifyCheckResults(self, healthChecks):
|
||||
failedCheck = 0
|
||||
for check in healthChecks:
|
||||
if check.success:
|
||||
print("check %s is good" % check.checkname)
|
||||
else:
|
||||
print("check %s failed due to %s" % (check.checkname, check.details))
|
||||
failedCheck = failedCheck + 1
|
||||
self.assertEquals(failedCheck, 0)
|
||||
|
||||
@attr(
|
||||
tags=[
|
||||
|
|
|
|||
|
|
@ -287,8 +287,8 @@ class TestRedundantIsolateNetworks(cloudstackTestCase):
|
|||
)
|
||||
|
||||
expected = 1
|
||||
ssh_command = "wget -t 1 -T 5 www.google.com"
|
||||
check_string = "HTTP request sent, awaiting response... 200 OK"
|
||||
ssh_command = "curl -v -m 1 -o index.html -sL www.google.com"
|
||||
check_string = "200 OK"
|
||||
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -307,8 +307,8 @@ class TestRedundantIsolateNetworks(cloudstackTestCase):
|
|||
)
|
||||
|
||||
expected = 0
|
||||
ssh_command = "wget -t 1 -T 1 www.google.com"
|
||||
check_string = "HTTP request sent, awaiting response... 200 OK"
|
||||
ssh_command = "curl -v -m 1 -o index.html -sL www.google.com"
|
||||
check_string = "200 OK"
|
||||
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -451,8 +451,8 @@ class TestRedundantIsolateNetworks(cloudstackTestCase):
|
|||
)
|
||||
|
||||
expected = 0
|
||||
ssh_command = "wget -t 1 -T 1 www.google.com"
|
||||
check_string = "HTTP request sent, awaiting response... 200 OK"
|
||||
ssh_command = "curl -v -m 1 -o index.html -sL www.google.com"
|
||||
check_string = "200 OK"
|
||||
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -480,8 +480,8 @@ class TestRedundantIsolateNetworks(cloudstackTestCase):
|
|||
)
|
||||
|
||||
expected = 1
|
||||
ssh_command = "wget -t 1 -T 5 www.google.com"
|
||||
check_string = "HTTP request sent, awaiting response... 200 OK"
|
||||
ssh_command = "curl -v -m 1 -o index.html -sL www.google.com"
|
||||
check_string = "200 OK"
|
||||
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -840,8 +840,8 @@ class TestIsolatedNetworks(cloudstackTestCase):
|
|||
)
|
||||
|
||||
expected = 1
|
||||
ssh_command = "wget -t 1 -T 5 www.google.com"
|
||||
check_string = "HTTP request sent, awaiting response... 200 OK"
|
||||
ssh_command = "curl -v -m 1 -o index.html -sL www.google.com"
|
||||
check_string = "200 OK"
|
||||
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -860,8 +860,8 @@ class TestIsolatedNetworks(cloudstackTestCase):
|
|||
)
|
||||
|
||||
expected = 0
|
||||
ssh_command = "wget -t 1 -T 1 www.google.com"
|
||||
check_string = "HTTP request sent, awaiting response... 200 OK"
|
||||
ssh_command = "curl -v -m 1 -o index.html -sL www.google.com"
|
||||
check_string = "200 OK"
|
||||
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -995,8 +995,8 @@ class TestIsolatedNetworks(cloudstackTestCase):
|
|||
)
|
||||
|
||||
expected = 0
|
||||
ssh_command = "wget -t 1 -T 1 www.google.com"
|
||||
check_string = "HTTP request sent, awaiting response... 200 OK"
|
||||
ssh_command = "curl -v -m 1 -o index.html -sL www.google.com"
|
||||
check_string = "200 OK"
|
||||
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -1015,8 +1015,8 @@ class TestIsolatedNetworks(cloudstackTestCase):
|
|||
)
|
||||
|
||||
expected = 1
|
||||
ssh_command = "wget -t 1 -T 5 www.google.com"
|
||||
check_string = "HTTP request sent, awaiting response... 200 OK"
|
||||
ssh_command = "curl -v -m 1 -o index.html -sL www.google.com"
|
||||
check_string = "200 OK"
|
||||
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
|
|||
|
|
@ -761,8 +761,9 @@ class TestIpv6Vpc(cloudstackTestCase):
|
|||
acl_chain = nic + ACL_CHAINS_SUFFIX[rule["traffictype"]]
|
||||
routerCmd = "nft list chain ip6 %s %s" % (ACL_TABLE, acl_chain)
|
||||
res = self.getRouterProcessStatus(router, routerCmd)
|
||||
self.assertTrue(rule["parsedrule"] in res,
|
||||
"Listing firewall rule with nft list chain failure for rule: %s" % rule["parsedrule"])
|
||||
parsed_rule_new = rule["parsedrule"].replace("{ ", "").replace(" }", "")
|
||||
self.assertTrue(rule["parsedrule"] in res or parsed_rule_new in res,
|
||||
"Listing firewall rule with nft list chain failure for rule: '%s' is not in '%s'" % (rule["parsedrule"], res))
|
||||
|
||||
def checkIpv6AclRule(self):
|
||||
router = self.getVpcRouter(self.vpc)
|
||||
|
|
|
|||
|
|
@ -592,7 +592,7 @@ class TestVpcSite2SiteVpn(cloudstackTestCase):
|
|||
time.sleep(20)
|
||||
|
||||
# setup ssh connection to vm2
|
||||
ssh_client = self._get_ssh_client(vm2, self.services, 10)
|
||||
ssh_client = self._get_ssh_client(vm2, self.services, 30)
|
||||
|
||||
if ssh_client:
|
||||
# run ping test
|
||||
|
|
|
|||
|
|
@ -1660,7 +1660,7 @@ under the License.
|
|||
</parameters>
|
||||
</command>
|
||||
|
||||
<!-- Test case 939 - verify that you can assign a load balancer to multipe vms -->
|
||||
<!-- Test case 939 - verify that you can assign a load balancer to multiple vms -->
|
||||
<command>
|
||||
<name>deployVirtualMachine</name>
|
||||
<testcase> [Deploy a Virtual Machine-1 to check multiple VMs - LB assignment]</testcase>
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ d-i partman-auto/expert_recipe string \
|
|||
use_filesystem{ } filesystem{ ext2 } \
|
||||
mountpoint{ /boot } \
|
||||
. \
|
||||
256 1000 256 linux-swap \
|
||||
512 1000 512 linux-swap \
|
||||
method{ swap } format{ } \
|
||||
. \
|
||||
2240 40 4000 ext4 \
|
||||
|
|
|
|||
|
|
@ -36,8 +36,8 @@ function add_backports() {
|
|||
sed -i '/deb-src/d' /etc/apt/sources.list
|
||||
sed -i '/backports/d' /etc/apt/sources.list
|
||||
sed -i '/security/d' /etc/apt/sources.list
|
||||
echo 'deb http://http.debian.net/debian bullseye-backports main' >> /etc/apt/sources.list
|
||||
echo 'deb http://security.debian.org/debian-security bullseye-security main' >> /etc/apt/sources.list
|
||||
echo 'deb http://http.debian.net/debian bookworm-backports main' >> /etc/apt/sources.list
|
||||
echo 'deb http://security.debian.org/debian-security bookworm-security main' >> /etc/apt/sources.list
|
||||
}
|
||||
|
||||
function apt_upgrade() {
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue